diff --git a/.github/actions/clean-disk/action.yml b/.github/actions/clean-disk/action.yml index 8bcc5f1396802..d74c3f25fc64c 100644 --- a/.github/actions/clean-disk/action.yml +++ b/.github/actions/clean-disk/action.yml @@ -31,7 +31,7 @@ runs: directories=(/usr/local/lib/android /opt/ghc) if [[ "${{ inputs.mode }}" == "full" ]]; then # remove these directories only when mode is 'full' - directories+=(/usr/share/dotnet) + directories+=(/usr/share/dotnet /opt/hostedtoolcache/CodeQL) fi emptydir=/tmp/empty$$/ mkdir $emptydir diff --git a/.github/actions/gradle-enterprise/action.yml b/.github/actions/gradle-enterprise/action.yml deleted file mode 100644 index 935e76d3cd645..0000000000000 --- a/.github/actions/gradle-enterprise/action.yml +++ /dev/null @@ -1,36 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -name: Configure Gradle Enterprise integration -description: Configure Gradle Enterprise when secret GE_ACCESS_TOKEN is available -inputs: - token: - description: 'The token for accessing Gradle Enterprise' - required: true -runs: - using: composite - steps: - - run: | - if [[ -n "${{ inputs.token }}" ]]; then - echo "::group::Configuring Gradle Enterprise for build" - cp .mvn/ge-extensions.xml .mvn/extensions.xml - echo "GRADLE_ENTERPRISE_ACCESS_KEY=${{ inputs.token }}" >> $GITHUB_ENV - echo "::endgroup::" - fi - shell: bash \ No newline at end of file diff --git a/.github/workflows/ci-go-functions.yaml b/.github/workflows/ci-go-functions.yaml index a34cb1a90908e..e347890a1d1c1 100644 --- a/.github/workflows/ci-go-functions.yaml +++ b/.github/workflows/ci-go-functions.yaml @@ -22,6 +22,7 @@ on: pull_request: branches: - master + - branch-3.0 paths: - '.github/workflows/**' - 'pulsar-function-go/**' @@ -32,7 +33,7 @@ concurrency: cancel-in-progress: true env: - MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 + MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 jobs: preconditions: diff --git a/.github/workflows/ci-maven-cache-update.yaml b/.github/workflows/ci-maven-cache-update.yaml index a5a88e2e0b8a6..f20feeea5c9db 100644 --- a/.github/workflows/ci-maven-cache-update.yaml +++ b/.github/workflows/ci-maven-cache-update.yaml @@ -42,13 +42,14 @@ on: - cron: '30 */12 * * *' env: - MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 + MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 jobs: update-maven-dependencies-cache: name: Update Maven dependency cache for ${{ matrix.name }} env: JOB_NAME: Update Maven dependency cache for ${{ matrix.name }} + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} runs-on: ${{ matrix.runs-on }} timeout-minutes: 45 @@ -77,11 +78,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Detect changed files if: ${{ github.event_name != 'schedule' }} id: changes diff --git a/.github/workflows/ci-owasp-dependency-check.yaml b/.github/workflows/ci-owasp-dependency-check.yaml index dba1c92ca28dd..090221e699d01 100644 --- a/.github/workflows/ci-owasp-dependency-check.yaml +++ b/.github/workflows/ci-owasp-dependency-check.yaml @@ -24,7 +24,7 @@ on: workflow_dispatch: env: - MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 + MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 jobs: run-owasp-dependency-check: @@ -32,6 +32,7 @@ jobs: name: Check ${{ matrix.branch }} env: JOB_NAME: Check ${{ matrix.branch }} + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} runs-on: ubuntu-20.04 timeout-minutes: 45 strategy: @@ -39,6 +40,7 @@ jobs: matrix: include: - branch: master + - branch: branch-3.0 - branch: branch-2.11 - branch: branch-2.10 jdk: 11 @@ -56,12 +58,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - if: ${{ matrix.branch == 'master' }} - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Cache local Maven repository uses: actions/cache@v3 timeout-minutes: 5 diff --git a/.github/workflows/pulsar-ci-flaky.yaml b/.github/workflows/pulsar-ci-flaky.yaml index ea19bc3307819..16dfa4a87aa29 100644 --- a/.github/workflows/pulsar-ci-flaky.yaml +++ b/.github/workflows/pulsar-ci-flaky.yaml @@ -22,6 +22,7 @@ on: pull_request: branches: - master + - branch-3.0 schedule: - cron: '0 12 * * *' workflow_dispatch: @@ -36,7 +37,7 @@ concurrency: cancel-in-progress: true env: - MAVEN_OPTS: -Xss1500k -Xmx1024m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 + MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 # defines the retention period for the intermediate build artifacts needed for rerunning a failed build job # it's possible to rerun individual failed jobs when the build artifacts are available # if the artifacts have already been expired, the complete workflow can be rerun by closing and reopening the PR or by rebasing the PR @@ -94,6 +95,7 @@ jobs: env: JOB_NAME: Flaky tests suite COLLECT_COVERAGE: "${{ needs.preconditions.outputs.collect_coverage }}" + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} runs-on: ubuntu-20.04 timeout-minutes: 100 if: ${{ needs.preconditions.outputs.docs_only != 'true' }} @@ -104,11 +106,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} diff --git a/.github/workflows/pulsar-ci.yaml b/.github/workflows/pulsar-ci.yaml index f7dbc755264d6..eaa173e4f135e 100644 --- a/.github/workflows/pulsar-ci.yaml +++ b/.github/workflows/pulsar-ci.yaml @@ -22,6 +22,7 @@ on: pull_request: branches: - master + - branch-3.0 schedule: - cron: '0 12 * * *' workflow_dispatch: @@ -36,7 +37,7 @@ concurrency: cancel-in-progress: true env: - MAVEN_OPTS: -Xss1500k -Xmx1024m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 + MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000 # defines the retention period for the intermediate build artifacts needed for rerunning a failed build job # it's possible to rerun individual failed jobs when the build artifacts are available # if the artifacts have already been expired, the complete workflow can be rerun by closing and reopening the PR or by rebasing the PR @@ -95,6 +96,7 @@ jobs: name: Build and License check env: JOB_NAME: Build and License check + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} runs-on: ubuntu-20.04 timeout-minutes: 60 if: ${{ needs.preconditions.outputs.docs_only != 'true' }} @@ -105,11 +107,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -175,6 +172,7 @@ jobs: env: JOB_NAME: CI - Unit - ${{ matrix.name }} COLLECT_COVERAGE: "${{ needs.preconditions.outputs.collect_coverage }}" + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} runs-on: ubuntu-20.04 timeout-minutes: ${{ matrix.timeout || 60 }} needs: ['preconditions', 'build-and-license-check'] @@ -192,6 +190,8 @@ jobs: group: BROKER_GROUP_2 - name: Brokers - Broker Group 3 group: BROKER_GROUP_3 + - name: Brokers - Broker Group 4 + group: BROKER_GROUP_4 - name: Brokers - Client Api group: BROKER_CLIENT_API - name: Brokers - Client Impl @@ -201,6 +201,10 @@ jobs: - name: Pulsar IO group: PULSAR_IO timeout: 75 + - name: Pulsar IO - Elastic Search + group: PULSAR_IO_ELASTIC + - name: Pulsar IO - Kafka Connect Adaptor + group: PULSAR_IO_KAFKA_CONNECT - name: Pulsar Client group: CLIENT @@ -211,11 +215,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -391,6 +390,8 @@ jobs: timeout-minutes: 60 needs: ['preconditions', 'build-and-license-check'] if: ${{ needs.preconditions.outputs.docs_only != 'true'}} + env: + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} steps: - name: checkout uses: actions/checkout@v3 @@ -398,11 +399,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -469,6 +465,7 @@ jobs: env: JOB_NAME: CI - Integration - ${{ matrix.name }} PULSAR_TEST_IMAGE_NAME: apachepulsar/java-test-image:latest + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} strategy: fail-fast: false matrix: @@ -483,6 +480,10 @@ jobs: - name: Messaging group: MESSAGING + - name: LoadBalance + group: LOADBALANCE + no_coverage: true + - name: Shade on Java 8 group: SHADE_RUN runtime_jdk: 8 @@ -513,11 +514,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -721,11 +717,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Install gh-actions-artifact-client.js uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master @@ -739,6 +730,8 @@ jobs: timeout-minutes: 60 needs: ['preconditions', 'build-and-license-check'] if: ${{ needs.preconditions.outputs.docs_only != 'true' }} + env: + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} steps: - name: checkout uses: actions/checkout@v3 @@ -746,11 +739,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -761,6 +749,8 @@ jobs: - name: Clean Disk uses: ./.github/actions/clean-disk + with: + mode: full - name: Cache local Maven repository uses: actions/cache@v3 @@ -798,6 +788,7 @@ jobs: # build docker image # include building of Pulsar SQL, Connectors, Offloaders and server distros mvn -B -am -pl pulsar-sql/presto-distribution,distribution/io,distribution/offloaders,distribution/server,distribution/shell,tests/docker-images/latest-version-image install \ + -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \ -Pmain,docker -Dmaven.test.skip=true -Ddocker.squash=true \ -Dspotbugs.skip=true -Dlicense.skip=true -Dcheckstyle.skip=true -Drat.skip=true @@ -850,6 +841,7 @@ jobs: env: JOB_NAME: CI - System - ${{ matrix.name }} PULSAR_TEST_IMAGE_NAME: apachepulsar/pulsar-test-latest-version:latest + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} strategy: fail-fast: false matrix: @@ -874,6 +866,7 @@ jobs: - name: Pulsar IO group: PULSAR_IO + clean_disk: true - name: Sql group: SQL @@ -885,10 +878,9 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} + - name: Clean Disk when needed + if: ${{ matrix.clean_disk }} + uses: ./.github/actions/clean-disk - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks @@ -1080,6 +1072,7 @@ jobs: env: JOB_NAME: CI Flaky - System - ${{ matrix.name }} PULSAR_TEST_IMAGE_NAME: apachepulsar/pulsar-test-latest-version:latest + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} strategy: fail-fast: false matrix: @@ -1089,6 +1082,7 @@ jobs: - name: Pulsar IO - Oracle group: PULSAR_IO_ORA + clean_disk: true steps: - name: checkout @@ -1097,10 +1091,9 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} + - name: Clean Disk when needed + if: ${{ matrix.clean_disk }} + uses: ./.github/actions/clean-disk - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks @@ -1207,11 +1200,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Install gh-actions-artifact-client.js uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master @@ -1225,6 +1213,8 @@ jobs: timeout-minutes: 120 needs: ['preconditions', 'integration-tests'] if: ${{ needs.preconditions.outputs.docs_only != 'true' }} + env: + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} steps: - name: checkout uses: actions/checkout@v3 @@ -1232,11 +1222,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Cache Maven dependencies uses: actions/cache@v3 timeout-minutes: 5 @@ -1263,6 +1248,8 @@ jobs: timeout-minutes: 120 needs: [ 'preconditions', 'integration-tests' ] if: ${{ needs.preconditions.outputs.need_owasp == 'true' }} + env: + GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} steps: - name: checkout uses: actions/checkout@v3 @@ -1270,11 +1257,6 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm - - name: Configure Gradle Enterprise - uses: ./.github/actions/gradle-enterprise - with: - token: ${{ secrets.GE_ACCESS_TOKEN }} - - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -1310,8 +1292,10 @@ jobs: cd $HOME $GITHUB_WORKSPACE/build/pulsar_ci_tool.sh restore_tar_from_github_actions_artifacts pulsar-maven-repository-binaries # Projects dependent on flume, hdfs, hbase, and presto currently excluded from the scan. - - name: run "clean verify" to trigger dependency check - run: mvn -q -B -ntp verify -PskipDocker,owasp-dependency-check -DskipTests -pl '!pulsar-sql,!distribution/io,!distribution/offloaders,!tiered-storage/file-system,!pulsar-io/flume,!pulsar-io/hbase,!pulsar-io/hdfs2,!pulsar-io/hdfs3,!pulsar-io/docs,!pulsar-io/jdbc/openmldb' + - name: trigger dependency check + run: | + mvn -B -ntp verify -PskipDocker,skip-all,owasp-dependency-check -Dcheckstyle.skip=true -DskipTests \ + -pl '!pulsar-sql,!distribution/server,!distribution/io,!distribution/offloaders,!pulsar-sql/presto-distribution,!tiered-storage/file-system,!pulsar-io/flume,!pulsar-io/hbase,!pulsar-io/hdfs2,!pulsar-io/hdfs3,!pulsar-io/docs,!pulsar-io/jdbc/openmldb' - name: Upload report uses: actions/upload-artifact@v3 diff --git a/.gitignore b/.gitignore index c584baaa0a0b8..cd00c44200059 100644 --- a/.gitignore +++ b/.gitignore @@ -97,4 +97,3 @@ test-reports/ # Gradle Enterprise .mvn/.gradle-enterprise/ -.mvn/extensions.xml diff --git a/.idea/vcs.xml b/.idea/vcs.xml index 99fce8b2b9812..951f00c3cc3b8 100644 --- a/.idea/vcs.xml +++ b/.idea/vcs.xml @@ -1,4 +1,4 @@ - + - - - - - - + + + + + + diff --git a/.mvn/ge-extensions.xml b/.mvn/extensions.xml similarity index 82% rename from .mvn/ge-extensions.xml rename to .mvn/extensions.xml index 1c7a1611c1bcc..ca4639f8053e0 100644 --- a/.mvn/ge-extensions.xml +++ b/.mvn/extensions.xml @@ -1,4 +1,4 @@ - + - + com.gradle gradle-enterprise-maven-extension - 1.16.3 + 1.17.1 com.gradle diff --git a/.mvn/gradle-enterprise.xml b/.mvn/gradle-enterprise.xml index 2667402c23cdb..c8626080d0866 100644 --- a/.mvn/gradle-enterprise.xml +++ b/.mvn/gradle-enterprise.xml @@ -1,4 +1,4 @@ - + - + https://ge.apache.org false diff --git a/bin/bookkeeper b/bin/bookkeeper index fb516a98acdc2..0cc07dd49aba5 100755 --- a/bin/bookkeeper +++ b/bin/bookkeeper @@ -168,7 +168,7 @@ OPTS="$OPTS -Dlog4j.configurationFile=`basename $BOOKIE_LOG_CONF`" # Allow Netty to use reflection access OPTS="$OPTS -Dio.netty.tryReflectionSetAccessible=true" -IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'` +IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' ) # Start --add-opens options # '--add-opens' option is not supported in jdk8 if [[ -z "$IS_JAVA_8" ]]; then diff --git a/bin/function-localrunner b/bin/function-localrunner index 45a37cb306794..2e0aa0f6dffe2 100755 --- a/bin/function-localrunner +++ b/bin/function-localrunner @@ -40,13 +40,15 @@ PULSAR_MEM=${PULSAR_MEM:-"-Xmx128m -XX:MaxDirectMemorySize=128m"} PULSAR_GC=${PULSAR_GC:-"-XX:+UseZGC -XX:+PerfDisableSharedMem -XX:+AlwaysPreTouch"} # Garbage collection log. -IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'` -# java version has space, use [[ -n $PARAM ]] to judge if variable exists -if [[ -n "$IS_JAVA_8" ]]; then - PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xloggc:logs/pulsar_gc_%p.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=20M"} -else -# After jdk 9, gc log param should config like this. Ignoring version less than jdk 8 +IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' ) +if [[ -z "$IS_JAVA_8" ]]; then + # >= JDK 9 PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xlog:gc:logs/pulsar_gc_%p.log:time,uptime:filecount=10,filesize=20M"} + # '--add-opens' option is not supported in JDK 1.8 + OPTS="$OPTS --add-opens java.base/sun.net=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED" +else + # == JDK 1.8 + PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xloggc:logs/pulsar_gc_%p.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=20M"} fi # Extra options to be passed to the jvm diff --git a/bin/pulsar b/bin/pulsar index a033de947d4b3..20ed1f7f22b0f 100755 --- a/bin/pulsar +++ b/bin/pulsar @@ -291,7 +291,7 @@ OPTS="$OPTS -Dzookeeper.clientTcpKeepAlive=true" # Allow Netty to use reflection access OPTS="$OPTS -Dio.netty.tryReflectionSetAccessible=true" -IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'` +IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' ) # Start --add-opens options # '--add-opens' option is not supported in jdk8 if [[ -z "$IS_JAVA_8" ]]; then @@ -307,6 +307,8 @@ if [[ -z "$IS_JAVA_8" ]]; then OPTS="$OPTS --add-opens java.management/sun.management=ALL-UNNAMED" # MBeanStatsGenerator OPTS="$OPTS --add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED" + # LinuxInfoUtils + OPTS="$OPTS --add-opens java.base/jdk.internal.platform=ALL-UNNAMED" fi OPTS="-cp $PULSAR_CLASSPATH $OPTS" diff --git a/bin/pulsar-admin-common.sh b/bin/pulsar-admin-common.sh index 8223ac5b3bf24..8aa21c00f634d 100755 --- a/bin/pulsar-admin-common.sh +++ b/bin/pulsar-admin-common.sh @@ -91,7 +91,7 @@ PULSAR_CLASSPATH="`dirname $PULSAR_LOG_CONF`:$PULSAR_CLASSPATH" OPTS="$OPTS -Dlog4j.configurationFile=`basename $PULSAR_LOG_CONF`" OPTS="$OPTS -Djava.net.preferIPv4Stack=true" -IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'` +IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' ) # Start --add-opens options # '--add-opens' option is not supported in jdk8 if [[ -z "$IS_JAVA_8" ]]; then diff --git a/bin/pulsar-perf b/bin/pulsar-perf index 47c02bc3d67d5..bdc1dc1ed8b8c 100755 --- a/bin/pulsar-perf +++ b/bin/pulsar-perf @@ -134,7 +134,7 @@ PULSAR_CLASSPATH="$PULSAR_JAR:$PULSAR_CLASSPATH:$PULSAR_EXTRA_CLASSPATH" PULSAR_CLASSPATH="`dirname $PULSAR_LOG_CONF`:$PULSAR_CLASSPATH" OPTS="$OPTS -Dlog4j.configurationFile=`basename $PULSAR_LOG_CONF` -Djava.net.preferIPv4Stack=true" -IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'` +IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' ) # Start --add-opens options # '--add-opens' option is not supported in jdk8 if [[ -z "$IS_JAVA_8" ]]; then diff --git a/bouncy-castle/bc/LICENSE b/bouncy-castle/bc/LICENSE index 5921755346e9e..dae8f16df5b82 100644 --- a/bouncy-castle/bc/LICENSE +++ b/bouncy-castle/bc/LICENSE @@ -205,6 +205,6 @@ This projects includes binary packages with the following licenses: Bouncy Castle License * Bouncy Castle -- licenses/LICENSE-bouncycastle.txt - - org.bouncycastle-bcpkix-jdk15on-1.60.jar - - org.bouncycastle-bcprov-jdk15on-1.60.jar - - org.bouncycastle-bcprov-ext-jdk15on-1.60.jar + - org.bouncycastle-bcpkix-jdk18on-1.75.jar + - org.bouncycastle-bcprov-jdk18on-1.75.jar + - org.bouncycastle-bcprov-ext-jdk18on-1.75.jar diff --git a/bouncy-castle/bc/pom.xml b/bouncy-castle/bc/pom.xml index cc7e7952b69f0..ef6637bac8917 100644 --- a/bouncy-castle/bc/pom.xml +++ b/bouncy-castle/bc/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative bouncy-castle-parent 3.0.0-SNAPSHOT .. - bouncy-castle-bc Apache Pulsar :: Bouncy Castle :: BC - ${project.groupId} @@ -39,23 +36,19 @@ ${project.version} provided - org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on ${bouncycastle.version} - org.bouncycastle - bcprov-ext-jdk15on + bcprov-ext-jdk18on ${bouncycastle.version} - - de.ntcomputer @@ -73,7 +66,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/bouncy-castle/bcfips-include-test/pom.xml b/bouncy-castle/bcfips-include-test/pom.xml index 44f0ada4630d7..19a2a2941bce1 100644 --- a/bouncy-castle/bcfips-include-test/pom.xml +++ b/bouncy-castle/bcfips-include-test/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative bouncy-castle-parent 3.0.0-SNAPSHOT .. - bcfips-include-test Pulsar Bouncy Castle FIPS Test Broker and client runs auth include BC FIPS verison - ${project.groupId} @@ -39,7 +37,6 @@ ${project.version} test - ${project.groupId} pulsar-broker @@ -53,7 +50,6 @@ test-jar test - ${project.groupId} pulsar-broker @@ -66,7 +62,6 @@ test - ${project.groupId} @@ -74,7 +69,6 @@ ${project.version} pkg - @@ -85,6 +79,28 @@ true + + maven-resources-plugin + + + copy-resources + test-compile + + copy-resources + + + ${project.build.testOutputDirectory}/certificate-authority + true + + + ${project.parent.parent.basedir}/tests/certificate-authority + false + + + + + + diff --git a/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java b/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java index 330d4fbc06897..e8e12838defef 100644 --- a/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java +++ b/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java @@ -37,11 +37,6 @@ import org.testng.annotations.BeforeMethod; public class TlsProducerConsumerBase extends ProducerConsumerBase { - protected final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - protected final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - protected final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - protected final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - protected final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; private final String clusterName = "use"; @BeforeMethod(alwaysRun = true) @@ -63,9 +58,9 @@ protected void cleanup() throws Exception { protected void internalSetUpForBroker() throws Exception { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setClusterName(clusterName); conf.setTlsRequireTrustedClientCertOnConnect(true); Set tlsProtocols = Sets.newConcurrentHashSet(); @@ -81,12 +76,12 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl) } ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(lookupUrl) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) .operationTimeout(1000, TimeUnit.MILLISECONDS); if (addCertificates) { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); clientBuilder.authentication(AuthenticationTls.class.getName(), authParams); } pulsarClient = clientBuilder.build(); @@ -94,15 +89,15 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl) protected void internalSetUpForNamespace() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); if (admin != null) { admin.close(); } admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false) .authentication(AuthenticationTls.class.getName(), authParams).build()); admin.clusters().createCluster(clusterName, ClusterData.builder() .serviceUrl(brokerUrl.toString()) diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem deleted file mode 100644 index e2b44e0bf0c42..0000000000000 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem +++ /dev/null @@ -1,71 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 15537474201172114493 (0xd7a0327703a8fc3d) - Signature Algorithm: sha256WithRSAEncryption - Issuer: CN=CARoot - Validity - Not Before: Feb 22 06:26:33 2023 GMT - Not After : Feb 19 06:26:33 2033 GMT - Subject: C=US, ST=CA, O=Apache, OU=Apache Pulsar, CN=localhost - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:af:bf:b7:2d:98:ad:9d:f6:da:a3:13:d4:62:0f: - 98:be:1c:a2:89:22:ba:6f:d5:fd:1f:67:e3:91:03: - 98:80:81:0e:ed:d8:f6:70:7f:2c:36:68:3d:53:ea: - 58:3a:a6:d5:89:66:4b:bd:1e:57:71:13:6d:4b:11: - e5:40:a5:76:84:24:92:40:58:80:96:c9:1f:2c:c4: - 55:eb:a3:79:73:70:5c:37:9a:89:ed:2f:ba:6b:e3: - 82:7c:69:4a:02:54:8b:81:5e:3c:bf:4c:8a:cb:ea: - 2c:5e:83:e7:b7:10:08:5f:82:58:a3:89:d1:da:92: - ba:2a:28:ee:30:28:3f:5b:ae:10:71:96:c7:e1:12: - c5:b0:1a:ad:44:6f:44:3a:11:4a:9a:3c:0f:8d:06: - 80:7b:34:ef:3f:6c:f4:5e:c5:44:54:1e:c8:dd:c7: - 80:85:80:d9:68:e6:c6:53:03:77:e1:fe:18:61:07: - 77:05:4c:ed:59:bc:5d:41:38:6a:ef:5d:a1:b2:60: - 98:d4:48:28:95:02:8a:0e:fd:cf:7b:1b:d2:11:cc: - 10:0c:50:73:d7:cc:38:6c:83:dd:79:26:aa:90:c8: - 9b:84:86:bc:59:e9:62:69:f4:98:1b:c4:80:78:7e: - a0:1a:81:9d:d2:e1:66:dd:c4:cc:fc:63:04:ac:ec: - a7:35 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Alternative Name: - DNS:localhost, IP Address:127.0.0.1 - Signature Algorithm: sha256WithRSAEncryption - 5f:e0:73:7b:5e:db:c0:8b:5e:4c:43:5f:80:94:ca:0b:f8:e9: - 9b:93:91:3d:b1:3a:99:ce:1c:fb:15:32:68:3e:b9:9c:52:d0: - 4b:7f:17:09:ec:af:6b:05:3e:e2:a3:e6:cc:bb:53:d7:ea:4a: - 82:3c:4e:a5:37:ca:f4:1e:38:e2:d6:a5:98:4d:ee:b9:e2:9a: - 48:d2:9f:0a:bc:61:42:70:22:b9:fb:cd:73:72:fb:94:13:ac: - 6e:c5:b6:4b:24:ef:0f:df:2d:e6:56:da:b2:76:e8:16:be:7f: - 3f:1b:99:6e:32:3e:b9:f4:2b:35:72:c7:e4:c6:a5:92:68:c0: - 1f:a0:f7:17:fd:a3:b6:73:98:d3:ea:1c:af:ea:7d:f8:a0:27: - 40:dc:4e:8b:13:28:ba:65:60:c5:90:57:e8:54:c1:83:b4:9d: - f0:ae:2a:de:27:57:e5:a2:e5:f4:87:1c:df:6b:dc:7b:43:ff: - b6:be:0b:3b:b2:8b:1a:36:dc:e3:57:aa:52:ef:23:d6:50:d7: - e4:72:8f:a0:0a:43:de:3d:f2:42:5b:fa:ed:1f:8d:0e:cf:c5: - 6a:ce:3b:8e:fd:6b:68:01:a9:f9:d2:0e:0d:ac:39:8d:f5:6c: - 80:f8:49:af:bb:b9:d4:81:b9:f3:b2:b6:ce:75:1c:20:e8:6a: - 53:dc:26:86 ------BEGIN CERTIFICATE----- -MIIDCTCCAfGgAwIBAgIJANegMncDqPw9MA0GCSqGSIb3DQEBCwUAMBExDzANBgNV -BAMMBkNBUm9vdDAeFw0yMzAyMjIwNjI2MzNaFw0zMzAyMTkwNjI2MzNaMFcxCzAJ -BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYwFAYDVQQL -Ew1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQCvv7ctmK2d9tqjE9RiD5i+HKKJIrpv1f0fZ+OR -A5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21LEeVApXaEJJJAWICWyR8sxFXr -o3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixeg+e3EAhfglijidHakroqKO4w -KD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/bPRexURUHsjdx4CFgNlo5sZT -A3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO/c97G9IRzBAMUHPXzDhsg915 -JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8YwSs7Kc1AgMBAAGjHjAcMBoG -A1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAX+Bz -e17bwIteTENfgJTKC/jpm5ORPbE6mc4c+xUyaD65nFLQS38XCeyvawU+4qPmzLtT -1+pKgjxOpTfK9B444talmE3uueKaSNKfCrxhQnAiufvNc3L7lBOsbsW2SyTvD98t -5lbasnboFr5/PxuZbjI+ufQrNXLH5MalkmjAH6D3F/2jtnOY0+ocr+p9+KAnQNxO -ixMoumVgxZBX6FTBg7Sd8K4q3idX5aLl9Icc32vce0P/tr4LO7KLGjbc41eqUu8j -1lDX5HKPoApD3j3yQlv67R+NDs/Fas47jv1raAGp+dIODaw5jfVsgPhJr7u51IG5 -87K2znUcIOhqU9wmhg== ------END CERTIFICATE----- diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem deleted file mode 100644 index 004bf8e21a7a9..0000000000000 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCvv7ctmK2d9tqj -E9RiD5i+HKKJIrpv1f0fZ+ORA5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21L -EeVApXaEJJJAWICWyR8sxFXro3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixe -g+e3EAhfglijidHakroqKO4wKD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/ -bPRexURUHsjdx4CFgNlo5sZTA3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO -/c97G9IRzBAMUHPXzDhsg915JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8 -YwSs7Kc1AgMBAAECggEAAaWEK9MwXTiA1+JJrRmETtOp2isPIBkbI/4vLZ6hASM0 -ZpoPxQIMAf58BJs/dF03xu/EaeMs4oxSC9ABG9fxAk/tZtjta3w65Ip6W5jOfHxj -AMpb3HMEBhq9kDjUTq1IGVAutYQcEMkC3WfS9e4ahfqMpguWgbu6LsbvZFgcL9mv -pGnKv9YVe6Xk6isvqtq6G1af0rd7c//xF0i0e/qEo83Buok3gLEZOELZbcRxjUYc -jnyglnXnwkGjuL4E3wgS3l73ZKsb6+AYoqhMPVz8t4/PN3tTrsBJKOSYo8KzIm0U -ek9T8XmPbP0cuheRxp9Dp8TXJJQZK0N9jz+EL0ogQQKBgQDnavm8GpR4pap9cDOc -+YI5s823b507pNdSU8elO9gLsP0JlFzv+sqghVko29r85D7Vn3MkgYTy0S4ANLCs -0NFDY8N2QH6U1dTkk1QXZydVZDuKJ5SSpC4v+Vafl8yDxhB4Nlxhbm9vJEMfLcXh -2kL6UlAuFDtYD0AdczwnHu5DjQKBgQDCauocm55FpcyDMMBO2CjurxcjBYS3S1xT -Bz+sPtxJLjlKbAt8kSHUQcCcX9zhrQBfsT38LATCmKaOFqUW5/PPh2LcrxiMqlL1 -OJBUJ3Te2LTjlUn8r+DHv/69UIh5tchwRr3YgB0DuIs7jfmr4VfiOWTBtPVhoGFR -1Wt60j30SQKBgHzreS26J2VNAFBALgxRf6OIVMbtgDG/FOCDCyU9vazp+F2gcd61 -QYYPFYcBzx9uUiDctroBFHRCyJMh3jEbc6ruAogl3m6XUxmkEeOkMk5dEerM3N2f -tLL+5Gy385U6aI+LwKhzhcG4EGeXPNdjC362ykNldnddnB2Jo/H2N2XNAoGAdnft -xpbxP+GDGKIZXTIM5zzcLWQMdiC+1n1BSHVZiGJZWMczzKknYw7aDq+/iekApE79 -xW8RS373ZvfXi3i2Mcx+6pjrrbOQL4tTL2SHq8+DknaDCi4mG7IbyUKMlxW1WO1S -e929UGogtZ6S+DCte9WbVwosyFuRUetpvgLk67kCgYBWetihZjgBWrqVYT24TTRH -KxzSzH1JgzzF9qgTdlhXDv9hC+Kc0uTKsgViesDqVuCOjkwzY5OQr9c6duO0fwwP -qNk/qltdgjMC5iiv7duyukfbEuqKEdGGer9HFb7en96dZdVQJpYHaaslAGurtD80 -ejCQZgzR2XaHSuIQb0IUVQ== ------END PRIVATE KEY----- diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem deleted file mode 100644 index 4ed454ec52a52..0000000000000 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem +++ /dev/null @@ -1,78 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 15358526754272834781 (0xd52472b5c5c3f4dd) - Signature Algorithm: sha256WithRSAEncryption - Issuer: CN=CARoot - Validity - Not Before: Feb 22 06:26:32 2023 GMT - Not After : Feb 19 06:26:32 2033 GMT - Subject: CN=CARoot - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:d0:87:45:0b:b4:83:11:ab:5a:b4:b6:1c:15:d4: - 92:6a:0c:ac:3b:76:da:ff:8d:61:1b:bd:96:bd:d7: - b0:70:23:87:d4:00:19:b2:e5:63:b7:80:58:4a:a4: - d8:a8:a6:4f:eb:c8:8c:54:07:f5:56:52:23:64:fc: - 66:54:39:f1:33:d0:e5:cc:b6:40:c8:d7:9a:9f:0e: - c4:aa:57:b0:b3:e2:41:61:54:ca:1f:90:3b:18:ef: - 60:d2:dc:ee:34:29:33:08:1b:37:4b:c4:ca:7e:cb: - 94:7f:50:c4:8d:16:2f:90:03:94:07:bf:cf:52:ff: - 24:54:56:ac:74:6c:d3:31:8c:ce:ef:b3:14:5a:5b: - 8a:0c:83:2d:e1:f7:4d:60:2f:a1:4d:85:38:96:7f: - 01:2f:9a:99:c7:2e:3d:09:4d:5e:53:df:fd:29:9f: - ff:6b:e4:c2:a1:e3:67:85:db:e2:02:4d:6f:29:d4: - e1:b3:a2:34:71:e0:90:dd:3f:b3:3f:86:41:8c:97: - 09:e6:c3:de:a0:0e:d3:d4:3e:ce:ea:58:70:e6:9f: - 24:a8:19:ca:df:61:b8:9c:c3:4e:53:d0:69:96:44: - 84:76:2b:99:65:08:06:42:d4:b2:76:a7:2f:69:12: - d5:c2:65:a6:ff:2c:77:73:00:e7:97:a5:77:6b:8a: - 9c:3f - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: critical - CA:TRUE - X509v3 Subject Key Identifier: - A7:55:6B:51:10:75:CE:4E:5B:0B:64:FF:A9:6D:23:FB:57:88:59:69 - X509v3 Authority Key Identifier: - keyid:A7:55:6B:51:10:75:CE:4E:5B:0B:64:FF:A9:6D:23:FB:57:88:59:69 - DirName:/CN=CARoot - serial:D5:24:72:B5:C5:C3:F4:DD - - Signature Algorithm: sha256WithRSAEncryption - 21:b1:4d:2b:14:1e:5a:91:5d:28:9e:ba:cb:ed:f1:96:da:c3: - fa:8d:b5:74:e4:c5:fb:2f:3e:39:b4:a6:59:69:dd:84:64:a8: - f0:e0:39:d2:ef:87:cc:8b:09:9f:0a:84:1f:d0:96:9c:4b:64: - ea:08:09:26:1c:84:f4:06:5f:5e:b9:ba:b3:3c:6c:81:e0:93: - 46:89:07:51:95:36:77:96:76:5d:a6:68:71:bb:60:88:a7:83: - 27:7c:66:5d:64:36:cb:8e:bd:02:f7:fb:52:63:83:2f:fe:57: - 4c:d5:0c:1b:ea:ef:88:ad:8c:a9:d4:b3:2c:b8:c4:e2:90:cb: - 0f:24:0e:df:fc:2a:c6:83:08:49:45:b0:41:85:0e:b4:6f:f7: - 18:56:7b:a5:0b:f6:1b:7f:72:88:ee:c8:ef:b3:e3:3e:f0:68: - 1b:c9:55:bb:4d:21:65:6b:9e:5c:dd:60:4b:7f:f1:84:f8:67: - 51:c2:60:88:42:6e:6c:9c:14:b8:96:b0:18:10:97:2c:94:e7: - 79:14:7b:d1:a2:a4:d8:94:84:ac:a9:ca:17:95:c2:27:8b:2b: - d8:19:6a:14:4b:c3:03:a6:30:55:40:bd:ce:0c:c2:d5:af:7d: - 6d:65:89:6b:74:ed:21:12:f1:aa:c9:c9:ba:da:9a:ca:14:6c: - 39:f4:02:32 ------BEGIN CERTIFICATE----- -MIIDGjCCAgKgAwIBAgIJANUkcrXFw/TdMA0GCSqGSIb3DQEBCwUAMBExDzANBgNV -BAMMBkNBUm9vdDAeFw0yMzAyMjIwNjI2MzJaFw0zMzAyMTkwNjI2MzJaMBExDzAN -BgNVBAMMBkNBUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANCH -RQu0gxGrWrS2HBXUkmoMrDt22v+NYRu9lr3XsHAjh9QAGbLlY7eAWEqk2KimT+vI -jFQH9VZSI2T8ZlQ58TPQ5cy2QMjXmp8OxKpXsLPiQWFUyh+QOxjvYNLc7jQpMwgb -N0vEyn7LlH9QxI0WL5ADlAe/z1L/JFRWrHRs0zGMzu+zFFpbigyDLeH3TWAvoU2F -OJZ/AS+amccuPQlNXlPf/Smf/2vkwqHjZ4Xb4gJNbynU4bOiNHHgkN0/sz+GQYyX -CebD3qAO09Q+zupYcOafJKgZyt9huJzDTlPQaZZEhHYrmWUIBkLUsnanL2kS1cJl -pv8sd3MA55eld2uKnD8CAwEAAaN1MHMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUp1VrURB1zk5bC2T/qW0j+1eIWWkwQQYDVR0jBDowOIAUp1VrURB1zk5bC2T/ -qW0j+1eIWWmhFaQTMBExDzANBgNVBAMMBkNBUm9vdIIJANUkcrXFw/TdMA0GCSqG -SIb3DQEBCwUAA4IBAQAhsU0rFB5akV0onrrL7fGW2sP6jbV05MX7Lz45tKZZad2E -ZKjw4DnS74fMiwmfCoQf0JacS2TqCAkmHIT0Bl9eubqzPGyB4JNGiQdRlTZ3lnZd -pmhxu2CIp4MnfGZdZDbLjr0C9/tSY4Mv/ldM1Qwb6u+IrYyp1LMsuMTikMsPJA7f -/CrGgwhJRbBBhQ60b/cYVnulC/Ybf3KI7sjvs+M+8GgbyVW7TSFla55c3WBLf/GE -+GdRwmCIQm5snBS4lrAYEJcslOd5FHvRoqTYlISsqcoXlcIniyvYGWoUS8MDpjBV -QL3ODMLVr31tZYlrdO0hEvGqycm62prKFGw59AIy ------END CERTIFICATE----- diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem deleted file mode 100644 index 3cf236c401255..0000000000000 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem +++ /dev/null @@ -1,71 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 15537474201172114494 (0xd7a0327703a8fc3e) - Signature Algorithm: sha256WithRSAEncryption - Issuer: CN=CARoot - Validity - Not Before: Feb 22 06:26:33 2023 GMT - Not After : Feb 19 06:26:33 2033 GMT - Subject: C=US, ST=CA, O=Apache, OU=Apache Pulsar, CN=superUser - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:cd:43:7d:98:40:f9:b0:5b:bc:ae:db:c0:0b:ad: - 26:90:96:e0:62:38:ed:68:b1:70:46:3b:de:44:f9: - 14:51:86:10:eb:ca:90:e7:88:e8:f9:91:85:e0:dd: - b5:b4:14:b9:78:e3:86:d5:54:6d:68:ec:14:92:b4: - f8:22:5b:05:3d:ed:31:25:65:08:05:84:ca:e6:0c: - 21:12:58:32:c7:1a:60:a3:4f:d2:4a:9e:28:19:7c: - 45:84:00:8c:89:dc:de:8a:e5:4f:88:91:cc:a4:f1: - 81:45:4c:7d:c2:ff:e2:c1:89:c6:12:73:95:e2:36: - bd:db:ae:8b:5a:68:6a:90:51:de:2b:88:5f:aa:67: - f4:a8:e3:63:dc:be:19:82:cc:9d:7f:e6:8d:fb:82: - be:22:01:3d:56:13:3b:5b:04:b4:e8:c5:18:e6:2e: - 0d:fa:ba:4a:8d:e8:c6:5a:a1:51:9a:4a:62:d7:af: - dd:b4:fc:e2:d5:cd:ae:99:6c:5c:61:56:0b:d7:0c: - 1a:77:5c:f5:3a:6a:54:b5:9e:33:ac:a9:75:28:9a: - 76:af:d0:7a:57:00:1b:91:13:31:fd:42:88:21:47: - 05:10:01:2f:59:bb:c7:3a:d9:e1:58:4c:1b:6c:71: - b6:98:ef:dd:03:82:58:a3:32:dc:90:a1:b6:a6:1e: - e1:0b - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Alternative Name: - DNS:localhost, IP Address:127.0.0.1 - Signature Algorithm: sha256WithRSAEncryption - b8:fc:d3:8f:8a:e0:6b:74:57:e2:a3:79:b2:18:60:0b:2c:05: - f9:e3:ae:dd:e9:ad:52:88:52:73:b4:12:b0:39:90:65:12:f5: - 95:0e:5f:4b:f2:06:4a:57:ab:e1:f9:b1:34:68:83:d7:d7:5e: - 69:0a:16:44:ea:1d:97:53:51:10:51:8b:ec:0a:b3:c8:a3:3d: - 85:4d:f4:8f:7d:b3:b5:72:e4:9e:d7:f3:01:bf:66:e1:40:92: - 54:63:16:b6:b5:66:ed:30:38:94:1d:1a:8f:28:34:27:ab:c9: - 5f:d5:16:7e:e4:f5:93:d2:19:35:44:0a:c4:2e:6a:25:38:1d: - ee:5a:c8:29:fa:96:dc:95:82:38:9e:36:3a:68:34:7b:4e:d9: - fa:0d:b2:88:a2:6c:4f:03:18:a7:e3:41:67:38:de:e5:f6:ff: - 2a:1c:f0:ec:1a:02:a7:e8:4e:3a:c3:04:72:f8:6a:4f:28:a6: - cf:0b:a2:db:33:74:d1:10:9e:ec:b4:ac:f8:b1:24:f4:ef:0e: - 05:e4:9d:1b:9a:40:f7:09:66:9c:9d:86:8b:76:96:46:e8:d1: - dc:10:c7:7d:0b:69:41:dc:a7:8e:e3:a3:36:e3:42:63:93:8c: - 91:80:0d:27:11:1c:2d:ae:fb:92:88:6c:6b:09:40:1a:30:dd: - 8f:ac:0f:62 ------BEGIN CERTIFICATE----- -MIIDCTCCAfGgAwIBAgIJANegMncDqPw+MA0GCSqGSIb3DQEBCwUAMBExDzANBgNV -BAMMBkNBUm9vdDAeFw0yMzAyMjIwNjI2MzNaFw0zMzAyMTkwNjI2MzNaMFcxCzAJ -BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYwFAYDVQQL -Ew1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlzdXBlclVzZXIwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQDNQ32YQPmwW7yu28ALrSaQluBiOO1osXBGO95E -+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSStPgiWwU97TElZQgFhMrmDCES -WDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFFTH3C/+LBicYSc5XiNr3brota -aGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1WEztbBLToxRjmLg36ukqN6MZa -oVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1njOsqXUomnav0HpXABuREzH9 -QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQobamHuELAgMBAAGjHjAcMBoG -A1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAuPzT -j4rga3RX4qN5shhgCywF+eOu3emtUohSc7QSsDmQZRL1lQ5fS/IGSler4fmxNGiD -19deaQoWROodl1NREFGL7AqzyKM9hU30j32ztXLkntfzAb9m4UCSVGMWtrVm7TA4 -lB0ajyg0J6vJX9UWfuT1k9IZNUQKxC5qJTgd7lrIKfqW3JWCOJ42Omg0e07Z+g2y -iKJsTwMYp+NBZzje5fb/Khzw7BoCp+hOOsMEcvhqTyimzwui2zN00RCe7LSs+LEk -9O8OBeSdG5pA9wlmnJ2Gi3aWRujR3BDHfQtpQdynjuOjNuNCY5OMkYANJxEcLa77 -kohsawlAGjDdj6wPYg== ------END CERTIFICATE----- diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem deleted file mode 100644 index 3835b3eacccc0..0000000000000 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDNQ32YQPmwW7yu -28ALrSaQluBiOO1osXBGO95E+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSS -tPgiWwU97TElZQgFhMrmDCESWDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFF -TH3C/+LBicYSc5XiNr3brotaaGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1W -EztbBLToxRjmLg36ukqN6MZaoVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1 -njOsqXUomnav0HpXABuREzH9QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQ -obamHuELAgMBAAECggEBALGnokJuqiz7mTj2NSdl+6TVEOuyPbiJKpV/J4cm1XEh -ye9qaTQcCRhH3UmcWrG75jM9KevloLRY8A1x1/lUMhtA+XJWGTU9k6a8BLut3nT4 -3X87jNTMQgSczEXNe9WudmZcxhN7rVVtOOdTpt1pP0cnCWna5HTf0D8cuLvM975j -r1YGTjKsCF1W+tp6ZAIIMfJkUI2qBRKvSxVCSs1vZBraox3yUVnq9oRLHxZZoqOd -d51G5phRtn6ReVPBdT8fGUBEGg3jKxTu2/vLQMUyHy0hyCAM20gzOP4FIc2g+QZU -y42byAuc89m0OrdRWsmzHCOxcq9DwY9npaz1RscR/2ECgYEA9bHJQ0Y1afpS5gn2 -KnXenRIw9oal1utQZnohCEJ4um+K/BCEHtDnI825LPNf34IKM2rSmssvHrYN51o0 -92j9lHHXsf6MVluwsTsIu8MtNaJ1BLt96dub4ScGT6vvzObKTwsajUfIHk+FNsKq -zps8yh1q0qyyfAcvR82+Xr6JIsMCgYEA1d+RHGewi/Ub/GCG99A1KFKsgbiIJnWB -IFmrcyPWignhzDUcw2SV9XqAzeK8EOIHNq3e5U/tkA7aCWxtLb5UsQ8xvmwQY2cy -X2XvSdIhO4K2PgRLgjlzZ8RHSULglqyjB2i6TjwjFl8TsRzYr6JlV6+2cMujw4Bl -g3a8gz071BkCgYBLP7BMkmw5kRliqxph1sffg3rLhmG0eU2elTkYtoMTVqZSnRxZ -89FW/eMBCWkLo2BMbyMhlalQ1qFbgh1GyTkhBdzx/uwsZtiu7021dAmcq6z7ThE6 -VrBfPPyJ2jcPon/DxbrUGnAIGILMSsLVlGYB4RCehZYEto6chz8O9Xw60QKBgCnd -us1BqviqwZC04JbQJie/j09RbS2CIQXRJ9PBNzUMXCwaVYgWP5ivI1mqQcBYTqsw -fAqNi+aAUcQ4emLS+Ec0vzsUclzTDbRJAv+DZ8f7fWtEcfeLAYFVldLMiaRVJRDF -OnsoIII3mGY6TFyNQKNanS8VXfheQQDsFFjoera5AoGBALXYEXkESXpw4LT6qJFz -ktQuTZDfS6LtR14/+NkYL9c5wBC4Otkg4bNbT8xGlUjethRfpkm8xRTB6zfC1/p/ -Cg6YU1cwqlkRurAhE3PEv1dCc1IDbzou8xnwqHrd6sGPDQmQ3aEtU5eJhDZKIZfx -nQqPGK92+Jtne7+W1mFZooxs ------END PRIVATE KEY----- diff --git a/bouncy-castle/bcfips/pom.xml b/bouncy-castle/bcfips/pom.xml index bd5d64bd84191..f3d341ab6132c 100644 --- a/bouncy-castle/bcfips/pom.xml +++ b/bouncy-castle/bcfips/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative bouncy-castle-parent 3.0.0-SNAPSHOT .. - bouncy-castle-bcfips Apache Pulsar :: Bouncy Castle :: BC-FIPS - ${project.groupId} @@ -39,20 +36,17 @@ ${project.version} provided - org.bouncycastle bc-fips ${bouncycastle.bc-fips.version} - org.bouncycastle bcpkix-fips ${bouncycastle.bcpkix-fips.version} - diff --git a/bouncy-castle/pom.xml b/bouncy-castle/pom.xml index 9f8ace79b77c3..b94f33946df2b 100644 --- a/bouncy-castle/pom.xml +++ b/bouncy-castle/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - @@ -42,16 +41,13 @@ - bouncy-castle-parent Apache Pulsar :: Bouncy Castle :: Parent - bc bcfips - bcfips-include-test diff --git a/build/build_java_test_image.sh b/build/build_java_test_image.sh index 0747e6dacb82a..459bf26f98eff 100755 --- a/build/build_java_test_image.sh +++ b/build/build_java_test_image.sh @@ -27,5 +27,6 @@ if [[ "$(docker version -f '{{.Server.Experimental}}' 2>/dev/null)" == "true" ]] SQUASH_PARAM="-Ddocker.squash=true" fi mvn -am -pl tests/docker-images/java-test-image -Pcore-modules,-main,integrationTests,docker \ + -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \ -Dmaven.test.skip=true -DskipSourceReleaseAssembly=true -Dspotbugs.skip=true -Dlicense.skip=true $SQUASH_PARAM \ "$@" install \ No newline at end of file diff --git a/build/docker/Dockerfile b/build/docker/Dockerfile index 7660325567748..3d9fc9c832bda 100644 --- a/build/docker/Dockerfile +++ b/build/docker/Dockerfile @@ -17,7 +17,7 @@ # under the License. # -FROM ubuntu:20.04 +FROM ubuntu:22.04 # prepare the directory for pulsar related files RUN mkdir /pulsar diff --git a/build/pulsar_ci_tool.sh b/build/pulsar_ci_tool.sh index 61199eda2c5d8..d946edd395789 100755 --- a/build/pulsar_ci_tool.sh +++ b/build/pulsar_ci_tool.sh @@ -46,7 +46,8 @@ function ci_print_thread_dumps() { # runs maven function _ci_mvn() { - mvn -B -ntp "$@" + mvn -B -ntp -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \ + "$@" } # runs OWASP Dependency Check for all projects diff --git a/build/regenerate_certs_for_tests.sh b/build/regenerate_certs_for_tests.sh index fff1c057060f3..9582a7496cd1d 100755 --- a/build/regenerate_certs_for_tests.sh +++ b/build/regenerate_certs_for_tests.sh @@ -68,13 +68,6 @@ reissue_certificate_no_subject \ $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/no-subject-alt-key.pem \ $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/no-subject-alt-cert.pem -generate_ca -cp ca-cert.pem $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem -reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem \ - $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem -reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem \ - $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem - generate_ca cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-key.pem \ diff --git a/build/run_integration_group.sh b/build/run_integration_group.sh index bc1255d8d68aa..9372d1ecb9317 100755 --- a/build/run_integration_group.sh +++ b/build/run_integration_group.sh @@ -156,6 +156,10 @@ test_group_messaging() { mvn_run_integration_test "$@" -DintegrationTestSuiteFile=pulsar-tls.xml -DintegrationTests } +test_group_loadbalance() { + mvn_run_integration_test "$@" -DintegrationTestSuiteFile=pulsar-loadbalance.xml -DintegrationTests +} + test_group_plugin() { mvn_run_integration_test "$@" -DintegrationTestSuiteFile=pulsar-plugin.xml -DintegrationTests } diff --git a/build/run_unit_group.sh b/build/run_unit_group.sh index ba49820ed1d33..17d0efeed9937 100755 --- a/build/run_unit_group.sh +++ b/build/run_unit_group.sh @@ -87,6 +87,10 @@ function test_group_broker_group_3() { mvn_test -pl pulsar-broker -Dgroups='broker-admin' } +function test_group_broker_group_4() { + mvn_test -pl pulsar-broker -Dgroups='cluster-migration' +} + function test_group_broker_client_api() { mvn_test -pl pulsar-broker -Dgroups='broker-api' } @@ -188,6 +192,18 @@ function test_group_pulsar_io() { echo "::endgroup::" } +function test_group_pulsar_io_elastic() { + echo "::group::Running elastic-search tests" + mvn_test --install -Ppulsar-io-elastic-tests,-main + echo "::endgroup::" +} + +function test_group_pulsar_io_kafka_connect() { + echo "::group::Running Pulsar IO Kafka connect adaptor tests" + mvn_test --install -Ppulsar-io-kafka-connect-tests,-main + echo "::endgroup::" +} + function list_test_groups() { declare -F | awk '{print $NF}' | sort | grep -E '^test_group_' | sed 's/^test_group_//g' | tr '[:lower:]' '[:upper:]' } diff --git a/buildtools/pom.xml b/buildtools/pom.xml index b22abf286f693..afb431d6d837d 100644 --- a/buildtools/pom.xml +++ b/buildtools/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache apache - 23 - + 29 + - - org.apache.pulsar + io.streamnative buildtools 3.0.0-SNAPSHOT jar Pulsar Build Tools - - ${maven.build.timestamp} + 2024-03-04T21:08:12Z 1.8 1.8 - 3.0.0-M3 + 3.1.0 2.18.0 1.7.32 - 7.7.0 + 7.7.1 3.11 4.1 - 3.4.0 8.37 3.1.2 - 4.1.89.Final + 4.1.100.Final 4.2.3 - 31.0.1-jre + 32.1.1-jre 1.10.12 - 1.32 + 2.0 3.12.4 --add-opens java.base/jdk.internal.loader=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED + --add-opens java.base/jdk.internal.platform=ALL-UNNAMED - @@ -72,9 +68,7 @@ - - org.yaml snakeyaml @@ -95,7 +89,6 @@ guice ${guice.version} - org.testng testng @@ -147,7 +140,6 @@ ${mockito.version} - @@ -183,11 +175,17 @@ ${test.additional.args} + + + org.apache.maven.surefire + surefire-testng + ${surefire.version} + + org.apache.maven.plugins maven-shade-plugin - ${maven-shade-plugin.version} true true @@ -250,13 +248,72 @@ **/proto/* + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.0 + + + attach-sources + + jar-no-fork + + + + + + maven-gpg-plugin + + + sign-artifacts + verify + + sign + + + + org.apache.maven.wagon wagon-ssh-external - 2.10 + 3.5.3 + + + ossrh + https://s01.oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ + + diff --git a/buildtools/src/main/resources/log4j2.xml b/buildtools/src/main/resources/log4j2.xml index 184f58487eaf0..9e12d754fe7b5 100644 --- a/buildtools/src/main/resources/log4j2.xml +++ b/buildtools/src/main/resources/log4j2.xml @@ -1,4 +1,4 @@ - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + diff --git a/buildtools/src/main/resources/pulsar/checkstyle.xml b/buildtools/src/main/resources/pulsar/checkstyle.xml index b3812ca8cccd7..14c8933540ac0 100644 --- a/buildtools/src/main/resources/pulsar/checkstyle.xml +++ b/buildtools/src/main/resources/pulsar/checkstyle.xml @@ -1,4 +1,4 @@ - + - - + - - - - - - - - - - - - + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + - - - - + IMPORT CHECKS - - - - + --> + + + + - - - - - + + + + + + + - - - - + + - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + } else --> - else --> - - - - - - - - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - - + + + diff --git a/buildtools/src/main/resources/pulsar/suppressions.xml b/buildtools/src/main/resources/pulsar/suppressions.xml index 7c78988db3e90..18a4e64d8836e 100644 --- a/buildtools/src/main/resources/pulsar/suppressions.xml +++ b/buildtools/src/main/resources/pulsar/suppressions.xml @@ -1,4 +1,4 @@ - + - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/conf/broker.conf b/conf/broker.conf index 292b1ef1683b7..1c4e2a26afb8b 100644 --- a/conf/broker.conf +++ b/conf/broker.conf @@ -537,6 +537,9 @@ brokerServiceCompactionThresholdInBytes=0 # If the execution time of the compaction phase one loop exceeds this time, the compaction will not proceed. brokerServiceCompactionPhaseOneLoopTimeInSeconds=30 +# Whether retain null-key message during topic compaction +topicCompactionRetainNullKey=true + # Whether to enable the delayed delivery for messages. # If disabled, messages will be immediately delivered and there will # be no tracking overhead. @@ -577,7 +580,8 @@ delayedDeliveryMaxIndexesPerBucketSnapshotSegment=5000 # The max number of delayed message index bucket, # after reaching the max buckets limitation, the adjacent buckets will be merged. -delayedDeliveryMaxNumBuckets=50 +# (disable with value -1) +delayedDeliveryMaxNumBuckets=-1 # Size of the lookahead window to use when detecting if all the messages in the topic # have a fixed delay. diff --git a/conf/entry_location_rocksdb.conf b/conf/entry_location_rocksdb.conf index 31bd58506ef75..42d916ded378f 100644 --- a/conf/entry_location_rocksdb.conf +++ b/conf/entry_location_rocksdb.conf @@ -52,6 +52,8 @@ max_bytes_for_level_base=268435456 # set by jni: options.setTargetFileSizeBase target_file_size_base=67108864 + # set by jni: options.setLevelCompactionDynamicLevelBytes + level_compaction_dynamic_level_bytes=true [TableOptions/BlockBasedTable "default"] # set by jni: tableOptions.setBlockSize @@ -66,5 +68,3 @@ filter_policy=rocksdb.BloomFilter:10:false # set by jni: tableOptions.setCacheIndexAndFilterBlocks cache_index_and_filter_blocks=true - # set by jni: options.setLevelCompactionDynamicLevelBytes - level_compaction_dynamic_level_bytes=true \ No newline at end of file diff --git a/conf/filesystem_offload_core_site.xml b/conf/filesystem_offload_core_site.xml index d26cec2cc60f0..3c7e6d94bbe50 100644 --- a/conf/filesystem_offload_core_site.xml +++ b/conf/filesystem_offload_core_site.xml @@ -1,3 +1,4 @@ + - - - fs.defaultFS - - - - hadoop.tmp.dir - pulsar - - - io.file.buffer.size - 4096 - - - io.seqfile.compress.blocksize - 1000000 - - - io.seqfile.compression.type - BLOCK - - - io.map.index.interval - 128 - - + + + fs.defaultFS + + + + hadoop.tmp.dir + pulsar + + + io.file.buffer.size + 4096 + + + io.seqfile.compress.blocksize + 1000000 + + + io.seqfile.compression.type + BLOCK + + + io.map.index.interval + 128 + diff --git a/conf/functions_log4j2.xml b/conf/functions_log4j2.xml index 6902a3acd8736..a1dcc6a1f654a 100644 --- a/conf/functions_log4j2.xml +++ b/conf/functions_log4j2.xml @@ -1,3 +1,4 @@ + - pulsar-functions-instance - 30 - - - pulsar.log.appender - RollingFile - - - pulsar.log.level - info - - - bk.log.level - info - - - - - Console - SYSTEM_OUT - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - RollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}*log.gz - - - 30d - - - - - - BkRollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}.bk*log.gz - - - 30d - - - - - - - - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - ${sys:bk.log.level} - false - - BkRollingFile - - - - ${sys:pulsar.log.level} - - ${sys:pulsar.log.appender} - ${sys:pulsar.log.level} - - - + pulsar-functions-instance + 30 + + + pulsar.log.appender + RollingFile + + + pulsar.log.level + info + + + bk.log.level + info + + + + + Console + SYSTEM_OUT + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + RollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + ${sys:pulsar.function.log.file}*log.gz + + + 30d + + + + + + BkRollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + ${sys:pulsar.function.log.file}.bk*log.gz + + + 30d + + + + + + + + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + ${sys:bk.log.level} + false + + BkRollingFile + + + + ${sys:pulsar.log.level} + + ${sys:pulsar.log.appender} + ${sys:pulsar.log.level} + + + diff --git a/conf/functions_worker.yml b/conf/functions_worker.yml index bb15e0ca416da..460becd9a94a3 100644 --- a/conf/functions_worker.yml +++ b/conf/functions_worker.yml @@ -43,6 +43,16 @@ metadataStoreOperationTimeoutSeconds: 30 # Metadata store cache expiry time in seconds metadataStoreCacheExpirySeconds: 300 +# Specifies if the function worker should use classloading for validating submissions for built-in +# connectors and functions. This is required for validateConnectorConfig to take effect. +# Default is false. +enableClassloadingOfBuiltinFiles: false + +# Specifies if the function worker should use classloading for validating submissions for external +# connectors and functions. This is required for validateConnectorConfig to take effect. +# Default is false. +enableClassloadingOfExternalFiles: false + ################################ # Function package management ################################ @@ -398,9 +408,20 @@ saslJaasServerRoleTokenSignerSecretPath: ######################## connectorsDirectory: ./connectors +# Whether to enable referencing connectors directory files by file url in connector (sink/source) creation +enableReferencingConnectorDirectoryFiles: true +# Regex patterns for enabling creation of connectors by referencing packages in matching http/https urls +additionalEnabledConnectorUrlPatterns: [] functionsDirectory: ./functions - -# Should connector config be validated during submission +# Whether to enable referencing functions directory files by file url in functions creation +enableReferencingFunctionsDirectoryFiles: true +# Regex patterns for enabling creation of functions by referencing packages in matching http/https urls +additionalEnabledFunctionsUrlPatterns: [] + +# Enables extended validation for connector config with fine-grain annotation based validation +# during submission. Classloading with either enableClassloadingOfExternalFiles or +# enableClassloadingOfBuiltinFiles must be enabled on the worker for this to take effect. +# Default is false. validateConnectorConfig: false # Whether to initialize distributed log metadata by runtime. diff --git a/conf/proxy.conf b/conf/proxy.conf index a33db8bbf7a30..613022b5dc2e1 100644 --- a/conf/proxy.conf +++ b/conf/proxy.conf @@ -28,17 +28,19 @@ metadataStoreUrl= # The metadata store URL for the configuration data. If empty, we fall back to use metadataStoreUrl configurationMetadataStoreUrl= -# If Service Discovery is Disabled this url should point to the discovery service provider. +# If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the discovery service +# provider, and does not support multi urls yet. # The URL must begin with pulsar:// for plaintext or with pulsar+ssl:// for TLS. brokerServiceURL= brokerServiceURLTLS= -# These settings are unnecessary if `zookeeperServers` is specified +# If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the discovery service +# provider, and does not support multi urls yet. brokerWebServiceURL= brokerWebServiceURLTLS= -# If function workers are setup in a separate cluster, configure the following 2 settings -# to point to the function workers cluster +# If function workers are setup in a separate cluster, configure the following 2 settings. This url should point to +# the discovery service provider of the function workers cluster, and does not support multi urls yet. functionWorkerWebServiceURL= functionWorkerWebServiceURLTLS= @@ -369,6 +371,8 @@ zooKeeperCacheExpirySeconds=-1 ### --- Metrics --- ### +# Whether to enable the proxy's /metrics and /proxy-stats http endpoints +enableProxyStatsEndpoints=true # Whether the '/metrics' endpoint requires authentication. Defaults to true authenticateMetricsEndpoint=true # Enable cache metrics data, default value is false diff --git a/conf/schema_example.conf b/conf/schema_example.json similarity index 100% rename from conf/schema_example.conf rename to conf/schema_example.json diff --git a/conf/standalone.conf b/conf/standalone.conf index f141946c29f4e..c055264ef3fc8 100644 --- a/conf/standalone.conf +++ b/conf/standalone.conf @@ -1264,4 +1264,8 @@ delayedDeliveryMaxIndexesPerBucketSnapshotSegment=5000 # The max number of delayed message index bucket, # after reaching the max buckets limitation, the adjacent buckets will be merged. -delayedDeliveryMaxNumBuckets=50 +# (disable with value -1) +delayedDeliveryMaxNumBuckets=-1 + +# Whether retain null-key message during topic compaction +topicCompactionRetainNullKey=true diff --git a/conf/websocket.conf b/conf/websocket.conf index 2e2824a838c6f..490cff2722ee5 100644 --- a/conf/websocket.conf +++ b/conf/websocket.conf @@ -194,3 +194,6 @@ zooKeeperSessionTimeoutMillis=-1 # ZooKeeper cache expiry time in seconds # Deprecated: use metadataStoreCacheExpirySeconds zooKeeperCacheExpirySeconds=-1 + +# CryptoKeyReader factory classname to support encryption at websocket. +cryptoKeyReaderFactoryClassName= diff --git a/distribution/io/pom.xml b/distribution/io/pom.xml index 99105bef950d5..47d6b06a2bf8a 100644 --- a/distribution/io/pom.xml +++ b/distribution/io/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.0.0-SNAPSHOT .. - pulsar-io-distribution pom Pulsar :: Distribution :: IO - ${project.groupId} @@ -46,7 +43,6 @@ provided - @@ -69,9 +65,32 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + - generator-connector-config @@ -151,5 +170,4 @@ - diff --git a/distribution/io/src/assemble/io.xml b/distribution/io/src/assemble/io.xml index 5b652170fdbb5..71d5aaf69f76a 100644 --- a/distribution/io/src/assemble/io.xml +++ b/distribution/io/src/assemble/io.xml @@ -1,3 +1,4 @@ + - + bin dir @@ -38,48 +37,120 @@ . 644 - - - ${basedir}/../../pulsar-io/cassandra/target/pulsar-io-cassandra-${project.version}.nar - ${basedir}/../../pulsar-io/twitter/target/pulsar-io-twitter-${project.version}.nar - ${basedir}/../../pulsar-io/kafka/target/pulsar-io-kafka-${project.version}.nar - ${basedir}/../../pulsar-io/http/target/pulsar-io-http-${project.version}.nar - ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar - ${basedir}/../../pulsar-io/rabbitmq/target/pulsar-io-rabbitmq-${project.version}.nar - ${basedir}/../../pulsar-io/nsq/target/pulsar-io-nsq-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/sqlite/target/pulsar-io-jdbc-sqlite-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/mariadb/target/pulsar-io-jdbc-mariadb-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/clickhouse/target/pulsar-io-jdbc-clickhouse-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/postgres/target/pulsar-io-jdbc-postgres-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/openmldb/target/pulsar-io-jdbc-openmldb-${project.version}.nar - ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar - ${basedir}/../../pulsar-io/batch-data-generator/target/pulsar-io-batch-data-generator-${project.version}.nar - ${basedir}/../../pulsar-io/aerospike/target/pulsar-io-aerospike-${project.version}.nar - ${basedir}/../../pulsar-io/elastic-search/target/pulsar-io-elastic-search-${project.version}.nar - ${basedir}/../../pulsar-io/kafka-connect-adaptor-nar/target/pulsar-io-kafka-connect-adaptor-${project.version}.nar - ${basedir}/../../pulsar-io/hbase/target/pulsar-io-hbase-${project.version}.nar - ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar - ${basedir}/../../pulsar-io/hdfs2/target/pulsar-io-hdfs2-${project.version}.nar - ${basedir}/../../pulsar-io/hdfs3/target/pulsar-io-hdfs3-${project.version}.nar - ${basedir}/../../pulsar-io/file/target/pulsar-io-file-${project.version}.nar - ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar - ${basedir}/../../pulsar-io/canal/target/pulsar-io-canal-${project.version}.nar - ${basedir}/../../pulsar-io/netty/target/pulsar-io-netty-${project.version}.nar - ${basedir}/../../pulsar-io/mongo/target/pulsar-io-mongo-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/mysql/target/pulsar-io-debezium-mysql-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/postgres/target/pulsar-io-debezium-postgres-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/oracle/target/pulsar-io-debezium-oracle-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/mssql/target/pulsar-io-debezium-mssql-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/mongodb/target/pulsar-io-debezium-mongodb-${project.version}.nar - ${basedir}/../../pulsar-io/influxdb/target/pulsar-io-influxdb-${project.version}.nar - ${basedir}/../../pulsar-io/redis/target/pulsar-io-redis-${project.version}.nar - ${basedir}/../../pulsar-io/flume/target/pulsar-io-flume-${project.version}.nar - ${basedir}/../../pulsar-io/solr/target/pulsar-io-solr-${project.version}.nar - ${basedir}/../../pulsar-io/dynamodb/target/pulsar-io-dynamodb-${project.version}.nar - ${basedir}/../../pulsar-io/alluxio/target/pulsar-io-alluxio-${project.version}.nar + + ${basedir}/../../pulsar-io/cassandra/target/pulsar-io-cassandra-${project.version}.nar + + + ${basedir}/../../pulsar-io/twitter/target/pulsar-io-twitter-${project.version}.nar + + + ${basedir}/../../pulsar-io/kafka/target/pulsar-io-kafka-${project.version}.nar + + + ${basedir}/../../pulsar-io/http/target/pulsar-io-http-${project.version}.nar + + + ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar + + + ${basedir}/../../pulsar-io/rabbitmq/target/pulsar-io-rabbitmq-${project.version}.nar + + + ${basedir}/../../pulsar-io/nsq/target/pulsar-io-nsq-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/sqlite/target/pulsar-io-jdbc-sqlite-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/mariadb/target/pulsar-io-jdbc-mariadb-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/clickhouse/target/pulsar-io-jdbc-clickhouse-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/postgres/target/pulsar-io-jdbc-postgres-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/openmldb/target/pulsar-io-jdbc-openmldb-${project.version}.nar + + + ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar + + + ${basedir}/../../pulsar-io/batch-data-generator/target/pulsar-io-batch-data-generator-${project.version}.nar + + + ${basedir}/../../pulsar-io/aerospike/target/pulsar-io-aerospike-${project.version}.nar + + + ${basedir}/../../pulsar-io/elastic-search/target/pulsar-io-elastic-search-${project.version}.nar + + + ${basedir}/../../pulsar-io/kafka-connect-adaptor-nar/target/pulsar-io-kafka-connect-adaptor-${project.version}.nar + + + ${basedir}/../../pulsar-io/hbase/target/pulsar-io-hbase-${project.version}.nar + + + ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar + + + ${basedir}/../../pulsar-io/hdfs2/target/pulsar-io-hdfs2-${project.version}.nar + + + ${basedir}/../../pulsar-io/hdfs3/target/pulsar-io-hdfs3-${project.version}.nar + + + ${basedir}/../../pulsar-io/file/target/pulsar-io-file-${project.version}.nar + + + ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar + + + ${basedir}/../../pulsar-io/canal/target/pulsar-io-canal-${project.version}.nar + + + ${basedir}/../../pulsar-io/netty/target/pulsar-io-netty-${project.version}.nar + + + ${basedir}/../../pulsar-io/mongo/target/pulsar-io-mongo-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/mysql/target/pulsar-io-debezium-mysql-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/postgres/target/pulsar-io-debezium-postgres-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/oracle/target/pulsar-io-debezium-oracle-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/mssql/target/pulsar-io-debezium-mssql-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/mongodb/target/pulsar-io-debezium-mongodb-${project.version}.nar + + + ${basedir}/../../pulsar-io/influxdb/target/pulsar-io-influxdb-${project.version}.nar + + + ${basedir}/../../pulsar-io/redis/target/pulsar-io-redis-${project.version}.nar + + + ${basedir}/../../pulsar-io/flume/target/pulsar-io-flume-${project.version}.nar + + + ${basedir}/../../pulsar-io/solr/target/pulsar-io-solr-${project.version}.nar + + + ${basedir}/../../pulsar-io/dynamodb/target/pulsar-io-dynamodb-${project.version}.nar + + + ${basedir}/../../pulsar-io/alluxio/target/pulsar-io-alluxio-${project.version}.nar + diff --git a/distribution/offloaders/pom.xml b/distribution/offloaders/pom.xml index 1e86758ed5a6a..b6058a3570dc1 100644 --- a/distribution/offloaders/pom.xml +++ b/distribution/offloaders/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.0.0-SNAPSHOT .. - pulsar-offloader-distribution pom Pulsar :: Distribution :: Offloader - ${project.groupId} @@ -66,7 +63,6 @@ - @@ -90,6 +86,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/offloaders/src/assemble/offloaders.xml b/distribution/offloaders/src/assemble/offloaders.xml index 38f7eee906064..19c60f68c21d4 100644 --- a/distribution/offloaders/src/assemble/offloaders.xml +++ b/distribution/offloaders/src/assemble/offloaders.xml @@ -1,3 +1,4 @@ + - + bin tar.gz @@ -38,13 +37,11 @@ . 644 - ${basedir}/../../tiered-storage/jcloud/target/tiered-storage-jcloud-${project.version}.nar offloaders 644 - ${basedir}/../../tiered-storage/file-system/target/tiered-storage-file-system-${project.version}.nar offloaders diff --git a/distribution/pom.xml b/distribution/pom.xml index 9782f269284bf..90ed0bf4a57cb 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - distribution pom Pulsar :: Distribution - - main @@ -47,7 +43,6 @@ shell - core-modules @@ -55,7 +50,6 @@ - @@ -65,6 +59,30 @@ ${skipBuildDistribution} + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/server/pom.xml b/distribution/server/pom.xml index 6b225bfc00c0b..e32c0dfdf05a0 100644 --- a/distribution/server/pom.xml +++ b/distribution/server/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.0.0-SNAPSHOT .. - pulsar-server-distribution pom Pulsar :: Distribution :: Server - ${project.groupId} pulsar-broker ${project.version} - ${project.groupId} pulsar-proxy ${project.version} - ${project.groupId} pulsar-broker-auth-oidc ${project.version} - ${project.groupId} pulsar-broker-auth-sasl ${project.version} - ${project.groupId} pulsar-client-auth-sasl ${project.version} - jline jline ${jline.version} - org.apache.zookeeper zookeeper-prometheus-metrics @@ -89,7 +80,6 @@ - ${project.groupId} pulsar-package-bookkeeper-storage @@ -101,13 +91,11 @@ - ${project.groupId} pulsar-package-filesystem-storage ${project.version} - ${project.groupId} pulsar-client-tools @@ -119,75 +107,67 @@ - ${project.groupId} pulsar-testclient ${project.version} - org.apache.logging.log4j log4j-api - org.apache.logging.log4j log4j-core - org.apache.logging.log4j log4j-web - io.dropwizard.metrics metrics-core - io.dropwizard.metrics metrics-graphite + + + amqp-client + com.rabbitmq + + - io.dropwizard.metrics metrics-jvm - org.xerial.snappy snappy-java - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - org.apache.logging.log4j log4j-slf4j-impl - org.apache.bookkeeper.stats prometheus-metrics-provider - io.prometheus simpleclient_log4j2 - ${project.groupId} bouncy-castle-bc ${project.version} pkg - ${project.groupId} @@ -202,7 +182,6 @@ - ${project.groupId} pulsar-functions-worker @@ -219,7 +198,6 @@ - ${project.groupId} @@ -234,7 +212,6 @@ - ${project.groupId} @@ -247,19 +224,20 @@ - io.grpc grpc-all - + + org.bouncycastle + bcpkix-jdk18on + io.perfmark perfmark-api compile - org.apache.bookkeeper.http @@ -281,7 +259,6 @@ vertx-web - @@ -322,6 +299,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/server/src/assemble/LICENSE.bin.txt b/distribution/server/src/assemble/LICENSE.bin.txt index c321e54de2e01..87216c9e32250 100644 --- a/distribution/server/src/assemble/LICENSE.bin.txt +++ b/distribution/server/src/assemble/LICENSE.bin.txt @@ -246,34 +246,34 @@ The Apache Software License, Version 2.0 * JCommander -- com.beust-jcommander-1.82.jar * High Performance Primitive Collections for Java -- com.carrotsearch-hppc-0.9.1.jar * Jackson - - com.fasterxml.jackson.core-jackson-annotations-2.13.4.jar - - com.fasterxml.jackson.core-jackson-core-2.13.4.jar - - com.fasterxml.jackson.core-jackson-databind-2.13.4.2.jar - - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.13.4.jar - - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.13.4.jar - - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.13.4.jar - - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.13.4.jar - - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.13.4.jar - - com.fasterxml.jackson.datatype-jackson-datatype-jdk8-2.13.4.jar - - com.fasterxml.jackson.datatype-jackson-datatype-jsr310-2.13.4.jar - - com.fasterxml.jackson.module-jackson-module-parameter-names-2.13.4.jar + - com.fasterxml.jackson.core-jackson-annotations-2.14.2.jar + - com.fasterxml.jackson.core-jackson-core-2.14.2.jar + - com.fasterxml.jackson.core-jackson-databind-2.14.2.jar + - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.14.2.jar + - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.14.2.jar + - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.14.2.jar + - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.14.2.jar + - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.14.2.jar + - com.fasterxml.jackson.datatype-jackson-datatype-jdk8-2.14.2.jar + - com.fasterxml.jackson.datatype-jackson-datatype-jsr310-2.14.2.jar + - com.fasterxml.jackson.module-jackson-module-parameter-names-2.14.2.jar * Caffeine -- com.github.ben-manes.caffeine-caffeine-2.9.1.jar * Conscrypt -- org.conscrypt-conscrypt-openjdk-uber-2.5.2.jar - * Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.0.1.jar - * Bitbucket -- org.bitbucket.b_c-jose4j-0.7.6.jar + * Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.9.0.jar + * Bitbucket -- org.bitbucket.b_c-jose4j-0.9.3.jar * Gson - com.google.code.gson-gson-2.8.9.jar - io.gsonfire-gson-fire-1.8.5.jar * Guava - - com.google.guava-guava-31.0.1-jre.jar + - com.google.guava-guava-32.1.1-jre.jar - com.google.guava-failureaccess-1.0.1.jar - com.google.guava-listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar * J2ObjC Annotations -- com.google.j2objc-j2objc-annotations-1.3.jar * Netty Reactive Streams -- com.typesafe.netty-netty-reactive-streams-2.0.6.jar * Swagger - - io.swagger-swagger-annotations-1.6.2.jar - - io.swagger-swagger-core-1.6.2.jar - - io.swagger-swagger-models-1.6.2.jar + - io.swagger-swagger-annotations-1.6.10.jar + - io.swagger-swagger-core-1.6.10.jar + - io.swagger-swagger-models-1.6.10.jar * DataSketches - com.yahoo.datasketches-memory-0.8.3.jar - com.yahoo.datasketches-sketches-core-0.8.3.jar @@ -285,41 +285,40 @@ The Apache Software License, Version 2.0 - commons-lang-commons-lang-2.6.jar - commons-logging-commons-logging-1.1.1.jar - org.apache.commons-commons-collections4-4.4.jar - - org.apache.commons-commons-compress-1.21.jar + - org.apache.commons-commons-compress-1.26.0.jar - org.apache.commons-commons-lang3-3.11.jar - org.apache.commons-commons-text-1.10.0.jar * Netty - - io.netty-netty-buffer-4.1.89.Final.jar - - io.netty-netty-codec-4.1.89.Final.jar - - io.netty-netty-codec-dns-4.1.89.Final.jar - - io.netty-netty-codec-http-4.1.89.Final.jar - - io.netty-netty-codec-http2-4.1.89.Final.jar - - io.netty-netty-codec-socks-4.1.89.Final.jar - - io.netty-netty-codec-haproxy-4.1.89.Final.jar - - io.netty-netty-common-4.1.89.Final.jar - - io.netty-netty-handler-4.1.89.Final.jar - - io.netty-netty-handler-proxy-4.1.89.Final.jar - - io.netty-netty-resolver-4.1.89.Final.jar - - io.netty-netty-resolver-dns-4.1.89.Final.jar - - io.netty-netty-resolver-dns-classes-macos-4.1.89.Final.jar - - io.netty-netty-resolver-dns-native-macos-4.1.89.Final-osx-aarch_64.jar - - io.netty-netty-resolver-dns-native-macos-4.1.89.Final-osx-x86_64.jar - - io.netty-netty-transport-4.1.89.Final.jar - - io.netty-netty-transport-classes-epoll-4.1.89.Final.jar - - io.netty-netty-transport-native-epoll-4.1.89.Final-linux-x86_64.jar - - io.netty-netty-transport-native-epoll-4.1.89.Final.jar - - io.netty-netty-transport-native-unix-common-4.1.89.Final.jar - - io.netty-netty-transport-native-unix-common-4.1.89.Final-linux-x86_64.jar - - io.netty-netty-tcnative-boringssl-static-2.0.56.Final.jar - - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-linux-aarch_64.jar - - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-linux-x86_64.jar - - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-osx-aarch_64.jar - - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-osx-x86_64.jar - - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-windows-x86_64.jar - - io.netty-netty-tcnative-classes-2.0.56.Final.jar - - io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.18.Final.jar - - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.18.Final-linux-x86_64.jar - - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.18.Final-linux-aarch_64.jar + - io.netty-netty-buffer-4.1.100.Final.jar + - io.netty-netty-codec-4.1.100.Final.jar + - io.netty-netty-codec-dns-4.1.100.Final.jar + - io.netty-netty-codec-http-4.1.100.Final.jar + - io.netty-netty-codec-http2-4.1.100.Final.jar + - io.netty-netty-codec-socks-4.1.100.Final.jar + - io.netty-netty-codec-haproxy-4.1.100.Final.jar + - io.netty-netty-common-4.1.100.Final.jar + - io.netty-netty-handler-4.1.100.Final.jar + - io.netty-netty-handler-proxy-4.1.100.Final.jar + - io.netty-netty-resolver-4.1.100.Final.jar + - io.netty-netty-resolver-dns-4.1.100.Final.jar + - io.netty-netty-resolver-dns-classes-macos-4.1.100.Final.jar + - io.netty-netty-resolver-dns-native-macos-4.1.100.Final-osx-aarch_64.jar + - io.netty-netty-resolver-dns-native-macos-4.1.100.Final-osx-x86_64.jar + - io.netty-netty-transport-4.1.100.Final.jar + - io.netty-netty-transport-classes-epoll-4.1.100.Final.jar + - io.netty-netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar + - io.netty-netty-transport-native-unix-common-4.1.100.Final.jar + - io.netty-netty-transport-native-unix-common-4.1.100.Final-linux-x86_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar + - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar + - io.netty-netty-tcnative-classes-2.0.61.Final.jar + - io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.21.Final.jar + - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar + - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar * Prometheus client - io.prometheus.jmx-collector-0.16.1.jar - io.prometheus-simpleclient-0.16.0.jar @@ -346,34 +345,34 @@ The Apache Software License, Version 2.0 - net.java.dev.jna-jna-jpms-5.12.1.jar - net.java.dev.jna-jna-platform-jpms-5.12.1.jar * BookKeeper - - org.apache.bookkeeper-bookkeeper-common-4.16.0.jar - - org.apache.bookkeeper-bookkeeper-common-allocator-4.16.0.jar - - org.apache.bookkeeper-bookkeeper-proto-4.16.0.jar - - org.apache.bookkeeper-bookkeeper-server-4.16.0.jar - - org.apache.bookkeeper-bookkeeper-tools-framework-4.16.0.jar - - org.apache.bookkeeper-circe-checksum-4.16.0.jar - - org.apache.bookkeeper-cpu-affinity-4.16.0.jar - - org.apache.bookkeeper-statelib-4.16.0.jar - - org.apache.bookkeeper-stream-storage-api-4.16.0.jar - - org.apache.bookkeeper-stream-storage-common-4.16.0.jar - - org.apache.bookkeeper-stream-storage-java-client-4.16.0.jar - - org.apache.bookkeeper-stream-storage-java-client-base-4.16.0.jar - - org.apache.bookkeeper-stream-storage-proto-4.16.0.jar - - org.apache.bookkeeper-stream-storage-server-4.16.0.jar - - org.apache.bookkeeper-stream-storage-service-api-4.16.0.jar - - org.apache.bookkeeper-stream-storage-service-impl-4.16.0.jar - - org.apache.bookkeeper.http-http-server-4.16.0.jar - - org.apache.bookkeeper.http-vertx-http-server-4.16.0.jar - - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.16.0.jar - - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.16.0.jar - - org.apache.distributedlog-distributedlog-common-4.16.0.jar - - org.apache.distributedlog-distributedlog-core-4.16.0-tests.jar - - org.apache.distributedlog-distributedlog-core-4.16.0.jar - - org.apache.distributedlog-distributedlog-protocol-4.16.0.jar - - org.apache.bookkeeper.stats-codahale-metrics-provider-4.16.0.jar - - org.apache.bookkeeper-bookkeeper-slogger-api-4.16.0.jar - - org.apache.bookkeeper-bookkeeper-slogger-slf4j-4.16.0.jar - - org.apache.bookkeeper-native-io-4.16.0.jar + - org.apache.bookkeeper-bookkeeper-common-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-common-allocator-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-proto-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-server-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-tools-framework-4.16.4.jar + - org.apache.bookkeeper-circe-checksum-4.16.4.jar + - org.apache.bookkeeper-cpu-affinity-4.16.4.jar + - org.apache.bookkeeper-statelib-4.16.4.jar + - org.apache.bookkeeper-stream-storage-api-4.16.4.jar + - org.apache.bookkeeper-stream-storage-common-4.16.4.jar + - org.apache.bookkeeper-stream-storage-java-client-4.16.4.jar + - org.apache.bookkeeper-stream-storage-java-client-base-4.16.4.jar + - org.apache.bookkeeper-stream-storage-proto-4.16.4.jar + - org.apache.bookkeeper-stream-storage-server-4.16.4.jar + - org.apache.bookkeeper-stream-storage-service-api-4.16.4.jar + - org.apache.bookkeeper-stream-storage-service-impl-4.16.4.jar + - org.apache.bookkeeper.http-http-server-4.16.4.jar + - org.apache.bookkeeper.http-vertx-http-server-4.16.4.jar + - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.16.4.jar + - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.16.4.jar + - org.apache.distributedlog-distributedlog-common-4.16.4.jar + - org.apache.distributedlog-distributedlog-core-4.16.4-tests.jar + - org.apache.distributedlog-distributedlog-core-4.16.4.jar + - org.apache.distributedlog-distributedlog-protocol-4.16.4.jar + - org.apache.bookkeeper.stats-codahale-metrics-provider-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-slogger-api-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-slogger-slf4j-4.16.4.jar + - org.apache.bookkeeper-native-io-4.16.4.jar * Apache HTTP Client - org.apache.httpcomponents-httpclient-4.5.13.jar - org.apache.httpcomponents-httpcore-4.4.15.jar @@ -383,59 +382,62 @@ The Apache Software License, Version 2.0 - org.asynchttpclient-async-http-client-2.12.1.jar - org.asynchttpclient-async-http-client-netty-utils-2.12.1.jar * Jetty - - org.eclipse.jetty-jetty-client-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-continuation-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-http-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-io-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-proxy-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-security-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-server-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-servlet-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-servlets-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-util-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-util-ajax-9.4.48.v20220622.jar - - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.48.v20220622.jar - - org.eclipse.jetty.websocket-websocket-api-9.4.48.v20220622.jar - - org.eclipse.jetty.websocket-websocket-client-9.4.48.v20220622.jar - - org.eclipse.jetty.websocket-websocket-common-9.4.48.v20220622.jar - - org.eclipse.jetty.websocket-websocket-server-9.4.48.v20220622.jar - - org.eclipse.jetty.websocket-websocket-servlet-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.48.v20220622.jar - - org.eclipse.jetty-jetty-alpn-server-9.4.48.v20220622.jar - * SnakeYaml -- org.yaml-snakeyaml-1.32.jar + - org.eclipse.jetty-jetty-client-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-continuation-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-http-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-io-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-proxy-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-security-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-server-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-servlet-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-servlets-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-util-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-util-ajax-9.4.53.v20231009.jar + - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.53.v20231009.jar + - org.eclipse.jetty.websocket-websocket-api-9.4.53.v20231009.jar + - org.eclipse.jetty.websocket-websocket-client-9.4.53.v20231009.jar + - org.eclipse.jetty.websocket-websocket-common-9.4.53.v20231009.jar + - org.eclipse.jetty.websocket-websocket-server-9.4.53.v20231009.jar + - org.eclipse.jetty.websocket-websocket-servlet-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.53.v20231009.jar + - org.eclipse.jetty-jetty-alpn-server-9.4.53.v20231009.jar + * SnakeYaml -- org.yaml-snakeyaml-2.0.jar * RocksDB - org.rocksdb-rocksdbjni-7.9.2.jar * Google Error Prone Annotations - com.google.errorprone-error_prone_annotations-2.5.1.jar * Apache Thrift - org.apache.thrift-libthrift-0.14.2.jar * OkHttp3 - com.squareup.okhttp3-logging-interceptor-4.9.3.jar - com.squareup.okhttp3-okhttp-4.9.3.jar - * Okio - com.squareup.okio-okio-2.8.0.jar + * Okio + - com.squareup.okio-okio-3.4.0.jar + - com.squareup.okio-okio-jvm-3.4.0.jar * Javassist -- org.javassist-javassist-3.25.0-GA.jar * Kotlin Standard Lib - - org.jetbrains.kotlin-kotlin-stdlib-1.4.32.jar - - org.jetbrains.kotlin-kotlin-stdlib-common-1.4.32.jar - - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.4.32.jar - - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.4.32.jar + - org.jetbrains.kotlin-kotlin-stdlib-1.6.0.jar + - org.jetbrains.kotlin-kotlin-stdlib-common-1.6.0.jar + - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.6.0.jar + - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.6.0.jar - org.jetbrains-annotations-13.0.jar * gRPC - - io.grpc-grpc-all-1.45.1.jar - - io.grpc-grpc-auth-1.45.1.jar - - io.grpc-grpc-context-1.45.1.jar - - io.grpc-grpc-core-1.45.1.jar - - io.grpc-grpc-netty-1.45.1.jar - - io.grpc-grpc-protobuf-1.45.1.jar - - io.grpc-grpc-protobuf-lite-1.45.1.jar - - io.grpc-grpc-stub-1.45.1.jar - - io.grpc-grpc-alts-1.45.1.jar - - io.grpc-grpc-api-1.45.1.jar - - io.grpc-grpc-grpclb-1.45.1.jar - - io.grpc-grpc-netty-shaded-1.45.1.jar - - io.grpc-grpc-services-1.45.1.jar - - io.grpc-grpc-xds-1.45.1.jar - - io.grpc-grpc-rls-1.45.1.jar - - com.google.auto.service-auto-service-annotations-1.0.jar + - io.grpc-grpc-all-1.55.3.jar + - io.grpc-grpc-auth-1.55.3.jar + - io.grpc-grpc-context-1.55.3.jar + - io.grpc-grpc-core-1.55.3.jar + - io.grpc-grpc-netty-1.55.3.jar + - io.grpc-grpc-protobuf-1.55.3.jar + - io.grpc-grpc-protobuf-lite-1.55.3.jar + - io.grpc-grpc-stub-1.55.3.jar + - io.grpc-grpc-alts-1.55.3.jar + - io.grpc-grpc-api-1.55.3.jar + - io.grpc-grpc-grpclb-1.55.3.jar + - io.grpc-grpc-netty-shaded-1.55.3.jar + - io.grpc-grpc-services-1.55.3.jar + - io.grpc-grpc-xds-1.55.3.jar + - io.grpc-grpc-rls-1.55.3.jar + - io.grpc-grpc-servlet-1.55.3.jar + - io.grpc-grpc-servlet-jakarta-1.55.3.jar * Perfmark - - io.perfmark-perfmark-api-0.19.0.jar + - io.perfmark-perfmark-api-0.26.0.jar * OpenCensus - io.opencensus-opencensus-api-0.28.0.jar - io.opencensus-opencensus-contrib-http-util-0.28.0.jar @@ -443,9 +445,13 @@ The Apache Software License, Version 2.0 * Jodah - net.jodah-typetools-0.5.0.jar - net.jodah-failsafe-2.4.4.jar + * Byte Buddy + - net.bytebuddy-byte-buddy-1.14.12.jar + * zt-zip + - org.zeroturnaround-zt-zip-1.17.jar * Apache Avro - - org.apache.avro-avro-1.10.2.jar - - org.apache.avro-avro-protobuf-1.10.2.jar + - org.apache.avro-avro-1.11.3.jar + - org.apache.avro-avro-protobuf-1.11.3.jar * Apache Curator - org.apache.curator-curator-client-5.1.0.jar - org.apache.curator-curator-framework-5.1.0.jar @@ -453,9 +459,9 @@ The Apache Software License, Version 2.0 * Apache Yetus - org.apache.yetus-audience-annotations-0.12.0.jar * Kubernetes Client - - io.kubernetes-client-java-12.0.1.jar - - io.kubernetes-client-java-api-12.0.1.jar - - io.kubernetes-client-java-proto-12.0.1.jar + - io.kubernetes-client-java-18.0.0.jar + - io.kubernetes-client-java-api-18.0.0.jar + - io.kubernetes-client-java-proto-18.0.0.jar * Dropwizard - io.dropwizard.metrics-metrics-core-4.1.12.1.jar - io.dropwizard.metrics-metrics-graphite-4.1.12.1.jar @@ -475,28 +481,29 @@ The Apache Software License, Version 2.0 - io.vertx-vertx-core-4.3.8.jar - io.vertx-vertx-web-4.3.8.jar - io.vertx-vertx-web-common-4.3.8.jar + - io.vertx-vertx-grpc-4.3.5.jar * Apache ZooKeeper - - org.apache.zookeeper-zookeeper-3.8.1.jar - - org.apache.zookeeper-zookeeper-jute-3.8.1.jar - - org.apache.zookeeper-zookeeper-prometheus-metrics-3.8.1.jar + - org.apache.zookeeper-zookeeper-3.9.1.jar + - org.apache.zookeeper-zookeeper-jute-3.9.1.jar + - org.apache.zookeeper-zookeeper-prometheus-metrics-3.9.1.jar * Snappy Java - - org.xerial.snappy-snappy-java-1.1.8.4.jar + - org.xerial.snappy-snappy-java-1.1.10.5.jar * Google HTTP Client - com.google.http-client-google-http-client-gson-1.41.0.jar - com.google.http-client-google-http-client-1.41.0.jar - com.google.auto.value-auto-value-annotations-1.9.jar - - com.google.re2j-re2j-1.5.jar + - com.google.re2j-re2j-1.6.jar * Jetcd - - io.etcd-jetcd-common-0.5.11.jar - - io.etcd-jetcd-core-0.5.11.jar + - io.etcd-jetcd-api-0.7.5.jar + - io.etcd-jetcd-common-0.7.5.jar + - io.etcd-jetcd-core-0.7.5.jar + - io.etcd-jetcd-grpc-0.7.5.jar * IPAddress - com.github.seancfoley-ipaddress-5.3.3.jar * RxJava - io.reactivex.rxjava3-rxjava-3.0.1.jar - * RabbitMQ Java Client - - com.rabbitmq-amqp-client-5.5.3.jar * RoaringBitmap - - org.roaringbitmap-RoaringBitmap-0.9.15.jar + - org.roaringbitmap-RoaringBitmap-0.9.44.jar BSD 3-clause "New" or "Revised" License * Google auth library @@ -516,7 +523,7 @@ MIT License - org.slf4j-slf4j-api-1.7.32.jar - org.slf4j-jcl-over-slf4j-1.7.32.jar * The Checker Framework - - org.checkerframework-checker-qual-3.12.0.jar + - org.checkerframework-checker-qual-3.33.0.jar * oshi - com.github.oshi-oshi-core-java11-6.4.0.jar * Auth0, Inc. @@ -572,10 +579,10 @@ Creative Commons Attribution License Bouncy Castle License * Bouncy Castle -- ../licenses/LICENSE-bouncycastle.txt - - org.bouncycastle-bcpkix-jdk15on-1.69.jar - - org.bouncycastle-bcprov-ext-jdk15on-1.69.jar - - org.bouncycastle-bcprov-jdk15on-1.69.jar - - org.bouncycastle-bcutil-jdk15on-1.69.jar + - org.bouncycastle-bcpkix-jdk18on-1.75.jar + - org.bouncycastle-bcprov-ext-jdk18on-1.75.jar + - org.bouncycastle-bcprov-jdk18on-1.75.jar + - org.bouncycastle-bcutil-jdk18on-1.75.jar ------------------------ diff --git a/distribution/server/src/assemble/bin.xml b/distribution/server/src/assemble/bin.xml index 41ac24d0582da..f65a8e2fb7260 100644 --- a/distribution/server/src/assemble/bin.xml +++ b/distribution/server/src/assemble/bin.xml @@ -1,3 +1,4 @@ + - + bin tar.gz @@ -131,14 +130,11 @@ ${artifact.groupId}-${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension} - - org.apache.pulsar:pulsar-functions-runtime-all - + io.streamnative:pulsar-functions-runtime-all org.projectlombok:lombok - - org.apache.pulsar:pulsar-functions-api-examples + io.streamnative:pulsar-functions-api-examples *:tar.gz diff --git a/distribution/shell/pom.xml b/distribution/shell/pom.xml index b38baee4257ba..d07a444115cd5 100644 --- a/distribution/shell/pom.xml +++ b/distribution/shell/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.0.0-SNAPSHOT .. - pulsar-shell-distribution pom Pulsar :: Distribution :: Shell - ${project.groupId} pulsar-client-tools ${project.version} - org.apache.logging.log4j log4j-core - org.apache.logging.log4j log4j-web @@ -53,14 +48,11 @@ org.apache.logging.log4j log4j-slf4j-impl - io.prometheus simpleclient_log4j2 - - @@ -101,6 +93,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/shell/src/assemble/LICENSE.bin.txt b/distribution/shell/src/assemble/LICENSE.bin.txt index 0ac08caa1c01d..cd230e3b0522c 100644 --- a/distribution/shell/src/assemble/LICENSE.bin.txt +++ b/distribution/shell/src/assemble/LICENSE.bin.txt @@ -311,30 +311,30 @@ This projects includes binary packages with the following licenses: The Apache Software License, Version 2.0 * JCommander -- jcommander-1.82.jar * Jackson - - jackson-annotations-2.13.4.jar - - jackson-core-2.13.4.jar - - jackson-databind-2.13.4.2.jar - - jackson-dataformat-yaml-2.13.4.jar - - jackson-jaxrs-base-2.13.4.jar - - jackson-jaxrs-json-provider-2.13.4.jar - - jackson-module-jaxb-annotations-2.13.4.jar - - jackson-module-jsonSchema-2.13.4.jar - - jackson-datatype-jdk8-2.13.4.jar - - jackson-datatype-jsr310-2.13.4.jar - - jackson-module-parameter-names-2.13.4.jar + - jackson-annotations-2.14.2.jar + - jackson-core-2.14.2.jar + - jackson-databind-2.14.2.jar + - jackson-dataformat-yaml-2.14.2.jar + - jackson-jaxrs-base-2.14.2.jar + - jackson-jaxrs-json-provider-2.14.2.jar + - jackson-module-jaxb-annotations-2.14.2.jar + - jackson-module-jsonSchema-2.14.2.jar + - jackson-datatype-jdk8-2.14.2.jar + - jackson-datatype-jsr310-2.14.2.jar + - jackson-module-parameter-names-2.14.2.jar * Conscrypt -- conscrypt-openjdk-uber-2.5.2.jar * Gson - gson-2.8.9.jar * Guava - - guava-31.0.1-jre.jar + - guava-32.1.1-jre.jar - failureaccess-1.0.1.jar - listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar * J2ObjC Annotations -- j2objc-annotations-1.3.jar * Netty Reactive Streams -- netty-reactive-streams-2.0.6.jar * Swagger - - swagger-annotations-1.6.2.jar - - swagger-core-1.6.2.jar - - swagger-models-1.6.2.jar + - swagger-annotations-1.6.10.jar + - swagger-core-1.6.10.jar + - swagger-models-1.6.10.jar * DataSketches - memory-0.8.3.jar - sketches-core-0.8.3.jar @@ -346,37 +346,37 @@ The Apache Software License, Version 2.0 - commons-logging-1.2.jar - commons-lang3-3.11.jar - commons-text-1.10.0.jar - - commons-compress-1.21.jar + - commons-compress-1.26.0.jar * Netty - - netty-buffer-4.1.89.Final.jar - - netty-codec-4.1.89.Final.jar - - netty-codec-dns-4.1.89.Final.jar - - netty-codec-http-4.1.89.Final.jar - - netty-codec-socks-4.1.89.Final.jar - - netty-codec-haproxy-4.1.89.Final.jar - - netty-common-4.1.89.Final.jar - - netty-handler-4.1.89.Final.jar - - netty-handler-proxy-4.1.89.Final.jar - - netty-resolver-4.1.89.Final.jar - - netty-resolver-dns-4.1.89.Final.jar - - netty-transport-4.1.89.Final.jar - - netty-transport-classes-epoll-4.1.89.Final.jar - - netty-transport-native-epoll-4.1.89.Final-linux-x86_64.jar - - netty-transport-native-unix-common-4.1.89.Final.jar - - netty-transport-native-unix-common-4.1.89.Final-linux-x86_64.jar - - netty-tcnative-boringssl-static-2.0.56.Final.jar - - netty-tcnative-boringssl-static-2.0.56.Final-linux-aarch_64.jar - - netty-tcnative-boringssl-static-2.0.56.Final-linux-x86_64.jar - - netty-tcnative-boringssl-static-2.0.56.Final-osx-aarch_64.jar - - netty-tcnative-boringssl-static-2.0.56.Final-osx-x86_64.jar - - netty-tcnative-boringssl-static-2.0.56.Final-windows-x86_64.jar - - netty-tcnative-classes-2.0.56.Final.jar - - netty-incubator-transport-classes-io_uring-0.0.18.Final.jar - - netty-incubator-transport-native-io_uring-0.0.18.Final-linux-aarch_64.jar - - netty-incubator-transport-native-io_uring-0.0.18.Final-linux-x86_64.jar - - netty-resolver-dns-classes-macos-4.1.89.Final.jar - - netty-resolver-dns-native-macos-4.1.89.Final-osx-aarch_64.jar - - netty-resolver-dns-native-macos-4.1.89.Final-osx-x86_64.jar + - netty-buffer-4.1.100.Final.jar + - netty-codec-4.1.100.Final.jar + - netty-codec-dns-4.1.100.Final.jar + - netty-codec-http-4.1.100.Final.jar + - netty-codec-socks-4.1.100.Final.jar + - netty-codec-haproxy-4.1.100.Final.jar + - netty-common-4.1.100.Final.jar + - netty-handler-4.1.100.Final.jar + - netty-handler-proxy-4.1.100.Final.jar + - netty-resolver-4.1.100.Final.jar + - netty-resolver-dns-4.1.100.Final.jar + - netty-transport-4.1.100.Final.jar + - netty-transport-classes-epoll-4.1.100.Final.jar + - netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar + - netty-transport-native-unix-common-4.1.100.Final.jar + - netty-transport-native-unix-common-4.1.100.Final-linux-x86_64.jar + - netty-tcnative-boringssl-static-2.0.61.Final.jar + - netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar + - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar + - netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar + - netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar + - netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar + - netty-tcnative-classes-2.0.61.Final.jar + - netty-incubator-transport-classes-io_uring-0.0.21.Final.jar + - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar + - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar + - netty-resolver-dns-classes-macos-4.1.100.Final.jar + - netty-resolver-dns-native-macos-4.1.100.Final-osx-aarch_64.jar + - netty-resolver-dns-native-macos-4.1.100.Final-osx-x86_64.jar * Prometheus client - simpleclient-0.16.0.jar - simpleclient_log4j2-0.16.0.jar @@ -390,29 +390,29 @@ The Apache Software License, Version 2.0 - log4j-web-2.18.0.jar * BookKeeper - - bookkeeper-common-allocator-4.16.0.jar - - cpu-affinity-4.16.0.jar - - circe-checksum-4.16.0.jar + - bookkeeper-common-allocator-4.16.4.jar + - cpu-affinity-4.16.4.jar + - circe-checksum-4.16.4.jar * AirCompressor - aircompressor-0.20.jar * AsyncHttpClient - async-http-client-2.12.1.jar - async-http-client-netty-utils-2.12.1.jar * Jetty - - jetty-client-9.4.48.v20220622.jar - - jetty-http-9.4.48.v20220622.jar - - jetty-io-9.4.48.v20220622.jar - - jetty-util-9.4.48.v20220622.jar - - javax-websocket-client-impl-9.4.48.v20220622.jar - - websocket-api-9.4.48.v20220622.jar - - websocket-client-9.4.48.v20220622.jar - - websocket-common-9.4.48.v20220622.jar - * SnakeYaml -- snakeyaml-1.32.jar + - jetty-client-9.4.53.v20231009.jar + - jetty-http-9.4.53.v20231009.jar + - jetty-io-9.4.53.v20231009.jar + - jetty-util-9.4.53.v20231009.jar + - javax-websocket-client-impl-9.4.53.v20231009.jar + - websocket-api-9.4.53.v20231009.jar + - websocket-client-9.4.53.v20231009.jar + - websocket-common-9.4.53.v20231009.jar + * SnakeYaml -- snakeyaml-2.0.jar * Google Error Prone Annotations - error_prone_annotations-2.5.1.jar * Javassist -- javassist-3.25.0-GA.jar * Apache Avro - - avro-1.10.2.jar - - avro-protobuf-1.10.2.jar + - avro-1.11.3.jar + - avro-protobuf-1.11.3.jar BSD 3-clause "New" or "Revised" License * JSR305 -- jsr305-3.0.2.jar -- ../licenses/LICENSE-JSR305.txt @@ -422,7 +422,7 @@ MIT License * SLF4J -- ../licenses/LICENSE-SLF4J.txt - slf4j-api-1.7.32.jar * The Checker Framework - - checker-qual-3.12.0.jar + - checker-qual-3.33.0.jar Protocol Buffers License * Protocol Buffers @@ -470,10 +470,10 @@ Creative Commons Attribution License Bouncy Castle License * Bouncy Castle -- ../licenses/LICENSE-bouncycastle.txt - - bcpkix-jdk15on-1.69.jar - - bcprov-ext-jdk15on-1.69.jar - - bcprov-jdk15on-1.69.jar - - bcutil-jdk15on-1.69.jar + - bcpkix-jdk18on-1.75.jar + - bcprov-ext-jdk18on-1.75.jar + - bcprov-jdk18on-1.75.jar + - bcutil-jdk18on-1.75.jar ------------------------ diff --git a/distribution/shell/src/assemble/shell.xml b/distribution/shell/src/assemble/shell.xml index f823e0258b231..cd11bd8a1ef57 100644 --- a/distribution/shell/src/assemble/shell.xml +++ b/distribution/shell/src/assemble/shell.xml @@ -1,3 +1,4 @@ + - + bin tar.gz @@ -46,7 +45,6 @@ . 644 - bin ${basedir}/../../bin/pulsar-admin-common.sh @@ -76,7 +74,6 @@ ${basedir}/../../conf/log4j2.yaml - lib diff --git a/docker/build.sh b/docker/build.sh index d8ab4bea882c4..88be44f23e73f 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -18,7 +18,7 @@ # under the License. # -ROOT_DIR=$(git rev-parse --show-toplevel) +ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )" cd $ROOT_DIR/docker mvn package -Pdocker,-main diff --git a/docker/get-version.sh b/docker/get-version.sh index 07145e7cf0c18..0b736baf3b270 100755 --- a/docker/get-version.sh +++ b/docker/get-version.sh @@ -18,7 +18,7 @@ # under the License. # -ROOT_DIR=$(git rev-parse --show-toplevel) +ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )" pushd $ROOT_DIR > /dev/null diff --git a/docker/pom.xml b/docker/pom.xml index 4d3b05fe33a76..e6e168f65349a 100644 --- a/docker/pom.xml +++ b/docker/pom.xml @@ -1,4 +1,4 @@ - + - + pom 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT @@ -39,6 +38,30 @@ ${skipBuildDistribution} + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + @@ -60,6 +83,20 @@ pulsar pulsar-all + + + + pl.project13.maven + git-commit-id-plugin + + false + true + true + false + + + + diff --git a/docker/publish.sh b/docker/publish.sh index af0d72d4b3437..651fefc1498e9 100755 --- a/docker/publish.sh +++ b/docker/publish.sh @@ -18,7 +18,7 @@ # under the License. # -ROOT_DIR=$(git rev-parse --show-toplevel) +ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null 2>&1 && pwd )" cd $ROOT_DIR/docker # We should only publish images that are made from official and approved releases @@ -49,6 +49,9 @@ fi MVN_VERSION=`./get-version.sh` echo "Pulsar version: ${MVN_VERSION}" +GIT_COMMIT_ID_ABBREV=$(git rev-parse --short=7 HEAD 2>/dev/null || echo no-git) +GIT_BRANCH=$(git branch --show-current 2>/dev/null || echo no-git) +IMAGE_TAG="${MVN_VERSION}-${GIT_COMMIT_ID_ABBREV}" if [[ -z ${DOCKER_REGISTRY} ]]; then docker_registry_org=${DOCKER_ORG} @@ -62,16 +65,21 @@ set -x # Fail if any of the subsequent commands fail set -e -docker tag pulsar:latest ${docker_registry_org}/pulsar:latest -docker tag pulsar-all:latest ${docker_registry_org}/pulsar-all:latest +if [[ "$GIT_BRANCH" == "master" ]]; then + docker tag apachepulsar/pulsar:${IMAGE_TAG} ${docker_registry_org}/pulsar:latest + docker tag apachepulsar/pulsar-all:${IMAGE_TAG} ${docker_registry_org}/pulsar-all:latest +fi -docker tag pulsar:latest ${docker_registry_org}/pulsar:$MVN_VERSION -docker tag pulsar-all:latest ${docker_registry_org}/pulsar-all:$MVN_VERSION +docker tag apachepulsar/pulsar:${IMAGE_TAG} ${docker_registry_org}/pulsar:$MVN_VERSION +docker tag apachepulsar/pulsar-all:${IMAGE_TAG} ${docker_registry_org}/pulsar-all:$MVN_VERSION # Push all images and tags -docker push ${docker_registry_org}/pulsar:latest -docker push ${docker_registry_org}/pulsar-all:latest +if [[ "$GIT_BRANCH" == "master" ]]; then + docker push ${docker_registry_org}/pulsar:latest + docker push ${docker_registry_org}/pulsar-all:latest +fi + docker push ${docker_registry_org}/pulsar:$MVN_VERSION docker push ${docker_registry_org}/pulsar-all:$MVN_VERSION -echo "Finished pushing images to ${docker_registry_org}" +echo "Finished pushing images to ${docker_registry_org}" \ No newline at end of file diff --git a/docker/pulsar-all/Dockerfile b/docker/pulsar-all/Dockerfile index 42431fc94a067..81ad74b65000f 100644 --- a/docker/pulsar-all/Dockerfile +++ b/docker/pulsar-all/Dockerfile @@ -17,6 +17,7 @@ # under the License. # +ARG PULSAR_IMAGE FROM busybox as pulsar-all ARG PULSAR_IO_DIR @@ -26,6 +27,6 @@ ADD ${PULSAR_IO_DIR} /connectors ADD ${PULSAR_OFFLOADER_TARBALL} / RUN mv /apache-pulsar-offloaders-*/offloaders /offloaders -FROM apachepulsar/pulsar:latest +FROM $PULSAR_IMAGE COPY --from=pulsar-all /connectors /pulsar/connectors COPY --from=pulsar-all /offloaders /pulsar/offloaders diff --git a/docker/pulsar-all/pom.xml b/docker/pulsar-all/pom.xml index c63ff6d656957..b9f01f6189008 100644 --- a/docker/pulsar-all/pom.xml +++ b/docker/pulsar-all/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar + io.streamnative docker-images 3.0.0-SNAPSHOT @@ -29,7 +29,6 @@ pulsar-all-docker-image Apache Pulsar :: Docker Images :: Pulsar Latest Version (Include All Components) pom - ${project.groupId} @@ -64,14 +63,20 @@ - - docker + git-commit-id-no-git + + + ${basedir}/../../.git/index + + - target/apache-pulsar-io-connectors-${project.version}-bin - target/pulsar-offloader-distribution-${project.version}-bin.tar.gz + no-git + + + docker @@ -143,17 +148,26 @@ - pulsar-all + ${docker.organization}/pulsar-all ${project.basedir} latest + ${project.version}-${git.commit.id.abbrev} + + target/apache-pulsar-io-connectors-${project.version}-bin + target/pulsar-offloader-distribution-${project.version}-bin.tar.gz + ${docker.organization}/pulsar:${project.version}-${git.commit.id.abbrev} + + + + ${docker.platforms} + + - latest - ${docker.organization} @@ -161,5 +175,27 @@ + + docker-push + + + + io.fabric8 + docker-maven-plugin + + + default + package + + build + tag + push + + + + + + + diff --git a/docker/pulsar/Dockerfile b/docker/pulsar/Dockerfile index 01e53e0152ac6..a4278ad1a717a 100644 --- a/docker/pulsar/Dockerfile +++ b/docker/pulsar/Dockerfile @@ -36,10 +36,14 @@ COPY scripts/install-pulsar-client.sh /pulsar/bin # The final image needs to give the root group sufficient permission for Pulsar components # to write to specific directories within /pulsar +# The ownership is changed to uid 10000 to allow using a different root group. This is necessary when running the +# container when gid=0 is prohibited. In that case, the container must be run with uid 10000 with +# any group id != 0 (for example 10001). # The file permissions are preserved when copying files from this builder image to the target image. RUN for SUBDIRECTORY in conf data download logs; do \ [ -d /pulsar/$SUBDIRECTORY ] || mkdir /pulsar/$SUBDIRECTORY; \ - chmod -R g+w /pulsar/$SUBDIRECTORY; \ + chmod -R ug+w /pulsar/$SUBDIRECTORY; \ + chown -R 10000:0 /pulsar/$SUBDIRECTORY; \ done # Trino writes logs to this directory (at least during tests), so we need to give the process permission @@ -49,11 +53,12 @@ RUN chmod g+w /pulsar/trino ### Create 2nd stage from Ubuntu image ### and add OpenJDK and Python dependencies (for Pulsar functions) -FROM ubuntu:20.04 +FROM ubuntu:22.04 ARG DEBIAN_FRONTEND=noninteractive ARG UBUNTU_MIRROR=mirror://mirrors.ubuntu.com/mirrors.txt ARG UBUNTU_SECURITY_MIRROR=http://security.ubuntu.com/ubuntu/ +ARG DEFAULT_USERNAME=pulsar # Install some utilities RUN sed -i -e "s|http://archive\.ubuntu\.com/ubuntu/|${UBUNTU_MIRROR:-mirror://mirrors.ubuntu.com/mirrors.txt}|g" \ @@ -61,9 +66,9 @@ RUN sed -i -e "s|http://archive\.ubuntu\.com/ubuntu/|${UBUNTU_MIRROR:-mirror://m && echo 'Acquire::http::Timeout "30";\nAcquire::ftp::Timeout "30";\nAcquire::Retries "3";' > /etc/apt/apt.conf.d/99timeout_and_retries \ && apt-get update \ && apt-get -y dist-upgrade \ - && apt-get -y install --no-install-recommends netcat dnsutils less procps iputils-ping \ - python3 python3-kazoo python3-pip \ - curl ca-certificates wget apt-transport-https + && apt-get -y install netcat dnsutils less procps iputils-ping \ + curl ca-certificates wget apt-transport-https \ + && apt-get -y install --no-install-recommends python3 python3-kazoo python3-pip # Install Eclipse Temurin Package RUN mkdir -p /etc/apt/keyrings \ @@ -74,6 +79,7 @@ RUN mkdir -p /etc/apt/keyrings \ && apt-get -y install temurin-17-jdk \ && export ARCH=$(uname -m | sed -r 's/aarch64/arm64/g' | awk '!/arm64/{$0="amd64"}1') \ && echo networkaddress.cache.ttl=1 >> /usr/lib/jvm/temurin-17-jdk-$ARCH/conf/security/java.security \ + && echo networkaddress.cache.negative.ttl=1 >> /usr/lib/jvm/temurin-17-jdk-$ARCH/conf/security/java.security \ # Cleanup apt RUN apt-get -y --purge autoremove \ @@ -81,7 +87,7 @@ RUN apt-get -y --purge autoremove \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -RUN pip3 install pyyaml==5.4.1 +RUN pip3 install pyyaml==6.0.1 # Pulsar currently writes to the below directories, assuming the default configuration. # Note that number 4 is the reason that pulsar components need write access to the /pulsar directory. @@ -104,4 +110,5 @@ RUN chmod +x /pulsar/bin/install-pulsar-client.sh RUN /pulsar/bin/install-pulsar-client.sh # The UID must be non-zero. Otherwise, it is arbitrary. No logic should rely on its specific value. +RUN useradd ${DEFAULT_USERNAME} -u 10000 -g 0 USER 10000 diff --git a/docker/pulsar/pom.xml b/docker/pulsar/pom.xml index 647f68bf1672c..246b9a07e309c 100644 --- a/docker/pulsar/pom.xml +++ b/docker/pulsar/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar + io.streamnative docker-images 3.0.0-SNAPSHOT @@ -29,7 +29,6 @@ pulsar-docker-image Apache Pulsar :: Docker Images :: Pulsar Latest Version pom - ${project.groupId} @@ -46,16 +45,24 @@ - + + mirror://mirrors.ubuntu.com/mirrors.txt + http://security.ubuntu.com/ubuntu/ + - docker + git-commit-id-no-git + + + ${basedir}/../../.git/index + + - target/pulsar-server-distribution-${project.version}-bin.tar.gz - ${pulsar.client.python.version} - ${env.UBUNTU_MIRROR} - ${env.UBUNTU_SECURITY_MIRROR} + no-git + + + docker @@ -72,17 +79,27 @@ - pulsar + ${docker.organization}/pulsar + + target/pulsar-server-distribution-${project.version}-bin.tar.gz + ${pulsar.client.python.version} + ${UBUNTU_MIRROR} + ${UBUNTU_SECURITY_MIRROR} + ${project.basedir} latest + ${project.version}-${git.commit.id.abbrev} + + + ${docker.platforms} + + - latest - ${docker.organization} @@ -108,5 +125,27 @@ + + docker-push + + + + io.fabric8 + docker-maven-plugin + + + default + package + + build + tag + push + + + + + + + diff --git a/docker/pulsar/scripts/apply-config-from-env-with-prefix.py b/docker/pulsar/scripts/apply-config-from-env-with-prefix.py index 58f6c98975005..9943b283a9f89 100755 --- a/docker/pulsar/scripts/apply-config-from-env-with-prefix.py +++ b/docker/pulsar/scripts/apply-config-from-env-with-prefix.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -32,83 +32,8 @@ # update if they exist and ignored if they don't. ############################################################ -import os -import sys - -if len(sys.argv) < 3: - print('Usage: %s [...]' % (sys.argv[0])) - sys.exit(1) - -# Always apply env config to env scripts as well -prefix = sys.argv[1] -conf_files = sys.argv[2:] - -PF_ENV_DEBUG = (os.environ.get('PF_ENV_DEBUG','0') == '1') - -for conf_filename in conf_files: - lines = [] # List of config file lines - keys = {} # Map a key to its line number in the file - - # Load conf file - for line in open(conf_filename): - lines.append(line) - line = line.strip() - if not line or line.startswith('#'): - continue - - try: - k,v = line.split('=', 1) - keys[k] = len(lines) - 1 - except: - if PF_ENV_DEBUG: - print("[%s] skip Processing %s" % (conf_filename, line)) - - # Update values from Env - for k in sorted(os.environ.keys()): - v = os.environ[k].strip() - - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v - - if k.startswith(prefix): - k = k[len(prefix):] - if k in keys: - print('[%s] Applying config %s = %s' % (conf_filename, k, displayValue)) - idx = keys[k] - lines[idx] = '%s=%s\n' % (k, v) - - - # Ensure we have a new-line at the end of the file, to avoid issue - # when appending more lines to the config - lines.append('\n') - - # Add new keys from Env - for k in sorted(os.environ.keys()): - v = os.environ[k] - if not k.startswith(prefix): - continue - - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v - - k = k[len(prefix):] - if k not in keys: - print('[%s] Adding config %s = %s' % (conf_filename, k, displayValue)) - lines.append('%s=%s\n' % (k, v)) - else: - print('[%s] Updating config %s = %s' % (conf_filename, k, displayValue)) - lines[keys[k]] = '%s=%s\n' % (k, v) - - - # Store back the updated config in the same file - f = open(conf_filename, 'w') - for line in lines: - f.write(line) - f.close() +# DEPRECATED: Use "apply-config-from-env.py --prefix MY_PREFIX_ conf_file" instead +# this is not a python script, but a bash script. Call apply-config-from-env.py with the prefix argument +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +"${SCRIPT_DIR}/apply-config-from-env.py" --prefix "$1" "${@:2}" diff --git a/docker/pulsar/scripts/apply-config-from-env.py b/docker/pulsar/scripts/apply-config-from-env.py index b8b479fc15b85..da51f05f8be66 100755 --- a/docker/pulsar/scripts/apply-config-from-env.py +++ b/docker/pulsar/scripts/apply-config-from-env.py @@ -25,18 +25,29 @@ ## ./apply-config-from-env file.conf ## -import os, sys - -if len(sys.argv) < 2: - print('Usage: %s' % (sys.argv[0])) +import os, sys, argparse + +parser = argparse.ArgumentParser(description='Pulsar configuration file customizer based on environment variables') +parser.add_argument('--prefix', default='PULSAR_PREFIX_', help='Prefix for environment variables, default is PULSAR_PREFIX_') +parser.add_argument('conf_files', nargs='*', help='Configuration files') +args = parser.parse_args() +if not args.conf_files: + parser.print_help() sys.exit(1) -# Always apply env config to env scripts as well -conf_files = sys.argv[1:] +env_prefix = args.prefix +conf_files = args.conf_files -PF_ENV_PREFIX = 'PULSAR_PREFIX_' PF_ENV_DEBUG = (os.environ.get('PF_ENV_DEBUG','0') == '1') +# List of keys where the value should not be displayed in logs +sensitive_keys = ["brokerClientAuthenticationParameters", "bookkeeperClientAuthenticationParameters", "tokenSecretKey"] + +def sanitize_display_value(k, v): + if "password" in k.lower() or k in sensitive_keys or (k == "tokenSecretKey" and v.startswith("data:")): + return "********" + return v + for conf_filename in conf_files: lines = [] # List of config file lines keys = {} # Map a key to its line number in the file @@ -47,7 +58,6 @@ line = line.strip() if not line: continue - try: k,v = line.split('=', 1) if k.startswith('#'): @@ -61,37 +71,26 @@ for k in sorted(os.environ.keys()): v = os.environ[k].strip() - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v - - if k.startswith(PF_ENV_PREFIX): - k = k[len(PF_ENV_PREFIX):] if k in keys: + displayValue = sanitize_display_value(k, v) print('[%s] Applying config %s = %s' % (conf_filename, k, displayValue)) idx = keys[k] lines[idx] = '%s=%s\n' % (k, v) - # Ensure we have a new-line at the end of the file, to avoid issue # when appending more lines to the config lines.append('\n') - - # Add new keys from Env + + # Add new keys from Env for k in sorted(os.environ.keys()): - v = os.environ[k] - if not k.startswith(PF_ENV_PREFIX): + if not k.startswith(env_prefix): continue - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v + v = os.environ[k].strip() + k = k[len(env_prefix):] + + displayValue = sanitize_display_value(k, v) - k = k[len(PF_ENV_PREFIX):] if k not in keys: print('[%s] Adding config %s = %s' % (conf_filename, k, displayValue)) lines.append('%s=%s\n' % (k, v)) @@ -99,10 +98,8 @@ print('[%s] Updating config %s = %s' % (conf_filename, k, displayValue)) lines[keys[k]] = '%s=%s\n' % (k, v) - # Store back the updated config in the same file f = open(conf_filename, 'w') for line in lines: f.write(line) - f.close() - + f.close() \ No newline at end of file diff --git a/docker/pulsar/scripts/gen-yml-from-env.py b/docker/pulsar/scripts/gen-yml-from-env.py index ce19436b7e0dd..aa40408ed5b1f 100755 --- a/docker/pulsar/scripts/gen-yml-from-env.py +++ b/docker/pulsar/scripts/gen-yml-from-env.py @@ -61,7 +61,7 @@ conf_files = sys.argv[1:] for conf_filename in conf_files: - conf = yaml.load(open(conf_filename)) + conf = yaml.load(open(conf_filename), Loader=yaml.FullLoader) # update the config modified = False diff --git a/jclouds-shaded/pom.xml b/jclouds-shaded/pom.xml index d4138ea041317..3b028bb8afc73 100644 --- a/jclouds-shaded/pom.xml +++ b/jclouds-shaded/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - jclouds-shaded Apache Pulsar :: Jclouds shaded - - org.apache.jclouds jclouds-allblobstore @@ -66,7 +61,6 @@ javax.annotation-api - @@ -82,7 +76,6 @@ true true false - com.google.guava:guava @@ -106,7 +99,6 @@ com.google.errorprone:* - com.google @@ -140,21 +132,20 @@ com.google.errorprone org.apache.pulsar.jcloud.shade.com.google.errorprone - - - org.apache.jclouds:jclouds-core - - - lib/gson*jar - - - + + org.apache.jclouds:jclouds-core + + + lib/gson*jar + + + diff --git a/managed-ledger/pom.xml b/managed-ledger/pom.xml index 2a7b9c576d318..fe1cf0cb3a2ae 100644 --- a/managed-ledger/pom.xml +++ b/managed-ledger/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - managed-ledger Managed Ledger - org.apache.bookkeeper bookkeeper-server - org.apache.bookkeeper.stats prometheus-metrics-provider - org.apache.bookkeeper.stats codahale-metrics-provider ${bookkeeper.version} + + + amqp-client + com.rabbitmq + + - com.google.protobuf protobuf-java - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-metadata ${project.version} - com.google.guava guava - ${project.groupId} testmocks ${project.version} test - org.apache.zookeeper zookeeper @@ -99,29 +94,25 @@ - io.dropwizard.metrics - metrics-core - test + io.dropwizard.metrics + metrics-core + test - org.xerial.snappy - snappy-java - test + org.xerial.snappy + snappy-java + test - org.awaitility awaitility test - org.slf4j slf4j-api - - @@ -141,7 +132,6 @@ - org.apache.maven.plugins maven-jar-plugin diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java index 7802ed07781ba..d1ffdf6d2d763 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java @@ -637,6 +637,23 @@ Position findNewestMatching(FindPositionConstraint constraint, Predicate void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate condition, FindEntryCallback callback, Object ctx); + /** + * Find the newest entry that matches the given predicate. + * + * @param constraint + * search only active entries or all entries + * @param condition + * predicate that reads an entry an applies a condition + * @param callback + * callback object returning the resultant position + * @param ctx + * opaque context + * @param isFindFromLedger + * find the newest entry from ledger + */ + void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate condition, + FindEntryCallback callback, Object ctx, boolean isFindFromLedger); + /** * reset the cursor to specified position to enable replay of messages. * @@ -786,6 +803,12 @@ Set asyncReplayEntries( */ long getEstimatedSizeSinceMarkDeletePosition(); + /** + * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is + * used to delete information about this ledger in the ManagedCursor. + */ + default void skipNonRecoverableLedger(long ledgerId){} + /** * Returns cursor throttle mark-delete rate. * diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java index 4ca56508891a1..f91d9ec3f5a02 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java @@ -631,6 +631,12 @@ void asyncSetProperties(Map properties, AsyncCallbacks.UpdatePro */ void trimConsumedLedgersInBackground(CompletableFuture promise); + /** + * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is + * used to delete information about this ledger in the ManagedCursor. + */ + default void skipNonRecoverableLedger(long ledgerId){} + /** * Roll current ledger if it is full. */ @@ -676,8 +682,10 @@ void asyncSetProperties(Map properties, AsyncCallbacks.UpdatePro /** * Check current inactive ledger (based on {@link ManagedLedgerConfig#getInactiveLedgerRollOverTimeMs()} and * roll over that ledger if inactive. + * + * @return true if ledger is considered for rolling over */ - void checkInactiveLedgerAndRollOver(); + boolean checkInactiveLedgerAndRollOver(); /** * Check if managed ledger should cache backlog reads. diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java index 0c93a5b642cf6..6ee9c2f949243 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java @@ -170,6 +170,7 @@ public int getMinimumRolloverTimeMs() { * the time unit */ public void setMinimumRolloverTime(int minimumRolloverTime, TimeUnit unit) { + checkArgument(minimumRolloverTime >= 0); this.minimumRolloverTimeMs = (int) unit.toMillis(minimumRolloverTime); checkArgument(maximumRolloverTimeMs >= minimumRolloverTimeMs, "Minimum rollover time needs to be less than maximum rollover time"); @@ -195,6 +196,7 @@ public long getMaximumRolloverTimeMs() { * the time unit */ public void setMaximumRolloverTime(int maximumRolloverTime, TimeUnit unit) { + checkArgument(maximumRolloverTime >= 0); this.maximumRolloverTimeMs = unit.toMillis(maximumRolloverTime); checkArgument(maximumRolloverTimeMs >= minimumRolloverTimeMs, "Maximum rollover time needs to be greater than minimum rollover time"); @@ -411,7 +413,8 @@ public ManagedLedgerConfig setThrottleMarkDelete(double throttleMarkDelete) { * time unit for retention time */ public ManagedLedgerConfig setRetentionTime(int retentionTime, TimeUnit unit) { - this.retentionTimeMs = unit.toMillis(retentionTime); + checkArgument(retentionTime >= -1, "The retention time should be -1, 0 or value > 0"); + this.retentionTimeMs = retentionTime != -1 ? unit.toMillis(retentionTime) : -1; return this; } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java index b1427bab80b22..e09fd84ea55f2 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java @@ -90,7 +90,7 @@ ManagedLedger open(String name, ManagedLedgerConfig config) * opaque context */ void asyncOpen(String name, ManagedLedgerConfig config, OpenLedgerCallback callback, - Supplier mlOwnershipChecker, Object ctx); + Supplier> mlOwnershipChecker, Object ctx); /** * Open a {@link ReadOnlyCursor} positioned to the earliest entry for the specified managed ledger. diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java index ef607fa7ed7cf..10e72f709feed 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java @@ -59,6 +59,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.LongStream; import org.apache.bookkeeper.client.AsyncCallback.CloseCallback; import org.apache.bookkeeper.client.AsyncCallback.OpenCallback; import org.apache.bookkeeper.client.BKException; @@ -195,11 +196,11 @@ public class ManagedCursorImpl implements ManagedCursor { position.ackSet = null; return position; }; - private final RangeSetWrapper individualDeletedMessages; + protected final RangeSetWrapper individualDeletedMessages; // Maintain the deletion status for batch messages // (ledgerId, entryId) -> deletion indexes - private final ConcurrentSkipListMap batchDeletedIndexes; + protected final ConcurrentSkipListMap batchDeletedIndexes; private final ReadWriteLock lock = new ReentrantReadWriteLock(); private RateLimiter markDeleteLimiter; @@ -509,7 +510,7 @@ public void operationComplete(ManagedCursorInfo info, Stat stat) { callback.operationComplete(); } else { // Need to proceed and read the last entry in the specified ledger to find out the last position - log.info("[{}] Consumer {} meta-data recover from ledger {}", ledger.getName(), name, + log.info("[{}] Cursor {} meta-data recover from ledger {}", ledger.getName(), name, info.getCursorsLedgerId()); recoverFromLedger(info, callback); } @@ -529,16 +530,16 @@ protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallbac long ledgerId = info.getCursorsLedgerId(); OpenCallback openCallback = (rc, lh, ctx) -> { if (log.isInfoEnabled()) { - log.info("[{}] Opened ledger {} for consumer {}. rc={}", ledger.getName(), ledgerId, name, rc); + log.info("[{}] Opened ledger {} for cursor {}. rc={}", ledger.getName(), ledgerId, name, rc); } if (isBkErrorNotRecoverable(rc)) { - log.error("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, + log.error("[{}] Error opening metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc)); // Rewind to oldest entry available initialize(getRollbackPosition(info), Collections.emptyMap(), Collections.emptyMap(), callback); return; } else if (rc != BKException.Code.OK) { - log.warn("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, + log.warn("[{}] Error opening metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc)); callback.operationFailed(new ManagedLedgerException(BKException.getMessage(rc))); return; @@ -548,7 +549,7 @@ protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallbac long lastEntryInLedger = lh.getLastAddConfirmed(); if (lastEntryInLedger < 0) { - log.warn("[{}] Error reading from metadata ledger {} for consumer {}: No entries in ledger", + log.warn("[{}] Error reading from metadata ledger {} for cursor {}: No entries in ledger", ledger.getName(), ledgerId, name); // Rewind to last cursor snapshot available initialize(getRollbackPosition(info), Collections.emptyMap(), cursorProperties, callback); @@ -560,13 +561,13 @@ protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallbac log.debug("[{}} readComplete rc={} entryId={}", ledger.getName(), rc1, lh1.getLastAddConfirmed()); } if (isBkErrorNotRecoverable(rc1)) { - log.error("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(), + log.error("[{}] Error reading from metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc1)); // Rewind to oldest entry available initialize(getRollbackPosition(info), Collections.emptyMap(), cursorProperties, callback); return; } else if (rc1 != BKException.Code.OK) { - log.warn("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(), + log.warn("[{}] Error reading from metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc1)); callback.operationFailed(createManagedLedgerException(rc1)); @@ -690,7 +691,7 @@ private void recoveredCursor(PositionImpl position, Map properties position = ledger.getLastPosition(); } log.info("[{}] Cursor {} recovered to position {}", ledger.getName(), name, position); - this.cursorProperties = cursorProperties; + this.cursorProperties = cursorProperties == null ? Collections.emptyMap() : cursorProperties; messagesConsumedCounter = -getNumberOfEntries(Range.openClosed(position, ledger.getLastPosition())); markDeletePosition = position; persistentMarkDeletePosition = position; @@ -786,6 +787,8 @@ public void asyncReadEntriesWithSkip(int numberOfEntriesToRead, long maxSizeByte int numOfEntriesToRead = applyMaxSizeCap(numberOfEntriesToRead, maxSizeBytes); PENDING_READ_OPS_UPDATER.incrementAndGet(this); + // Skip deleted entries. + skipCondition = skipCondition == null ? this::isMessageDeleted : skipCondition.or(this::isMessageDeleted); OpReadEntry op = OpReadEntry.create(this, readPosition, numOfEntriesToRead, callback, ctx, maxPosition, skipCondition); ledger.asyncReadEntries(op); @@ -816,6 +819,11 @@ public void readEntryComplete(Entry entry, Object ctx) { result.entry = entry; counter.countDown(); } + + @Override + public String toString() { + return String.format("Cursor [%s] get Nth entry", ManagedCursorImpl.this); + } }, null); counter.await(ledger.getConfig().getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); @@ -939,6 +947,8 @@ public void asyncReadEntriesWithSkipOrWait(int maxEntries, long maxSizeBytes, Re asyncReadEntriesWithSkip(numberOfEntriesToRead, NO_MAX_SIZE_LIMIT, callback, ctx, maxPosition, skipCondition); } else { + // Skip deleted entries. + skipCondition = skipCondition == null ? this::isMessageDeleted : skipCondition.or(this::isMessageDeleted); OpReadEntry op = OpReadEntry.create(this, readPosition, numberOfEntriesToRead, callback, ctx, maxPosition, skipCondition); @@ -1102,6 +1112,9 @@ public long getNumberOfEntriesInBacklog(boolean isPrecise) { messagesConsumedCounter, markDeletePosition, readPosition); } if (isPrecise) { + if (markDeletePosition.compareTo(ledger.getLastPosition()) >= 0) { + return 0; + } return getNumberOfEntries(Range.openClosed(markDeletePosition, ledger.getLastPosition())); } @@ -1182,6 +1195,12 @@ public void findEntryFailed(ManagedLedgerException exception, Optional @Override public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate condition, FindEntryCallback callback, Object ctx) { + asyncFindNewestMatching(constraint, condition, callback, ctx, false); + } + + @Override + public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate condition, + FindEntryCallback callback, Object ctx, boolean isFindFromLedger) { OpFindNewest op; PositionImpl startPosition = null; long max = 0; @@ -1203,7 +1222,11 @@ public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate Optional.empty(), ctx); return; } - op = new OpFindNewest(this, startPosition, condition, max, callback, ctx); + if (isFindFromLedger) { + op = new OpFindNewest(this.ledger, startPosition, condition, max, callback, ctx); + } else { + op = new OpFindNewest(this, startPosition, condition, max, callback, ctx); + } op.find(); } @@ -1251,7 +1274,8 @@ protected void internalResetCursor(PositionImpl proposedReadPosition, newReadPosition = proposedReadPosition; } - log.info("[{}] Initiate reset readPosition to {} on cursor {}", ledger.getName(), newReadPosition, name); + log.info("[{}] Initiate reset readPosition from {} to {} on cursor {}", ledger.getName(), readPosition, + newReadPosition, name); synchronized (pendingMarkDeleteOps) { if (!RESET_CURSOR_IN_PROGRESS_UPDATER.compareAndSet(this, FALSE, TRUE)) { @@ -1518,6 +1542,11 @@ public synchronized void readEntryFailed(ManagedLedgerException mle, Object ctx) callback.readEntriesFailed(exception.get(), ctx); } } + + @Override + public String toString() { + return String.format("Cursor [%s] async replay entries", ManagedCursorImpl.this); + } }; positions.stream().filter(position -> !alreadyAcknowledgedPositions.contains(position)) @@ -1779,7 +1808,6 @@ long getNumIndividualDeletedEntriesToSkip(long numEntries) { } finally { if (r.lowerEndpoint() instanceof PositionImplRecyclable) { ((PositionImplRecyclable) r.lowerEndpoint()).recycle(); - ((PositionImplRecyclable) r.upperEndpoint()).recycle(); } } }, recyclePositionRangeConverter); @@ -2091,7 +2119,7 @@ void internalMarkDelete(final MarkDeleteEntry mdEntry) { } }); - persistPositionToLedger(cursorLedger, mdEntry, new VoidCallback() { + VoidCallback cb = new VoidCallback() { @Override public void operationComplete() { if (log.isDebugEnabled()) { @@ -2143,7 +2171,18 @@ public void operationFailed(ManagedLedgerException exception) { mdEntry.triggerFailed(exception); } - }); + }; + + if (State.NoLedger.equals(STATE_UPDATER.get(this))) { + if (ledger.isNoMessagesAfterPos(mdEntry.newPosition)) { + persistPositionToMetaStore(mdEntry, cb); + } else { + mdEntry.callback.markDeleteFailed(new ManagedLedgerException("Create new cursor ledger failed"), + mdEntry.ctx); + } + } else { + persistPositionToLedger(cursorLedger, mdEntry, cb); + } } @Override @@ -2434,8 +2473,12 @@ List filterReadEntries(List entries) { @Override public synchronized String toString() { - return MoreObjects.toStringHelper(this).add("ledger", ledger.getName()).add("name", name) - .add("ackPos", markDeletePosition).add("readPos", readPosition).toString(); + return MoreObjects.toStringHelper(this) + .add("ledger", ledger.getName()) + .add("name", name) + .add("ackPos", markDeletePosition) + .add("readPos", readPosition) + .toString(); } @Override @@ -2648,32 +2691,47 @@ public void operationComplete(Void result, Stat stat) { } @Override - public void operationFailed(MetaStoreException e) { - if (e instanceof MetaStoreException.BadVersionException) { + public void operationFailed(MetaStoreException topLevelException) { + if (topLevelException instanceof MetaStoreException.BadVersionException) { log.warn("[{}] Failed to update cursor metadata for {} due to version conflict {}", - ledger.name, name, e.getMessage()); + ledger.name, name, topLevelException.getMessage()); // it means previous owner of the ml might have updated the version incorrectly. So, check // the ownership and refresh the version again. - if (ledger.mlOwnershipChecker != null && ledger.mlOwnershipChecker.get()) { - ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, - new MetaStoreCallback() { - @Override - public void operationComplete(ManagedCursorInfo info, Stat stat) { - updateCursorLedgerStat(info, stat); - } - - @Override - public void operationFailed(MetaStoreException e) { - if (log.isDebugEnabled()) { - log.debug( - "[{}] Failed to refresh cursor metadata-version for {} due " - + "to {}", ledger.name, name, e.getMessage()); - } - } - }); + if (ledger.mlOwnershipChecker != null) { + ledger.mlOwnershipChecker.get().whenComplete((hasOwnership, t) -> { + if (t == null && hasOwnership) { + ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, + new MetaStoreCallback<>() { + @Override + public void operationComplete(ManagedCursorInfo info, Stat stat) { + updateCursorLedgerStat(info, stat); + // fail the top level call so that the caller can retry + callback.operationFailed(topLevelException); + } + + @Override + public void operationFailed(MetaStoreException e) { + if (log.isDebugEnabled()) { + log.debug( + "[{}] Failed to refresh cursor metadata-version " + + "for {} due to {}", ledger.name, name, + e.getMessage()); + } + // fail the top level call so that the caller can retry + callback.operationFailed(topLevelException); + } + }); + } else { + // fail the top level call so that the caller can retry + callback.operationFailed(topLevelException); + } + }); + } else { + callback.operationFailed(topLevelException); } + } else { + callback.operationFailed(topLevelException); } - callback.operationFailed(e); } }); } @@ -2718,6 +2776,39 @@ void setReadPosition(Position newReadPositionInt) { } } + /** + * Manually acknowledge all entries in the lost ledger. + * - Since this is an uncommon event, we focus on maintainability. So we do not modify + * {@link #individualDeletedMessages} and {@link #batchDeletedIndexes}, but call + * {@link #asyncDelete(Position, AsyncCallbacks.DeleteCallback, Object)}. + * - This method is valid regardless of the consumer ACK type. + * - If there is a consumer ack request after this event, it will also work. + */ + @Override + public void skipNonRecoverableLedger(final long ledgerId){ + LedgerInfo ledgerInfo = ledger.getLedgersInfo().get(ledgerId); + if (ledgerInfo == null) { + return; + } + log.warn("[{}] [{}] Since the ledger [{}] is lost and the autoSkipNonRecoverableData is true, this ledger will" + + " be auto acknowledge in subscription", ledger.getName(), name, ledgerId); + asyncDelete(() -> LongStream.range(0, ledgerInfo.getEntries()) + .mapToObj(i -> (Position) PositionImpl.get(ledgerId, i)).iterator(), + new AsyncCallbacks.DeleteCallback() { + @Override + public void deleteComplete(Object ctx) { + // ignore. + } + + @Override + public void deleteFailed(ManagedLedgerException ex, Object ctx) { + // The method internalMarkDelete already handled the failure operation. We only need to + // make sure the memory state is updated. + // If the broker crashed, the non-recoverable ledger will be detected again. + } + }, null); + } + // ////////////////////////////////////////////////// void startCreatingNewMetadataLedger() { @@ -2749,16 +2840,15 @@ public void operationComplete() { @Override public void operationFailed(ManagedLedgerException exception) { - log.error("[{}][{}] Metadata ledger creation failed", ledger.getName(), name, exception); + log.error("[{}][{}] Metadata ledger creation failed {}, try to persist the position in the metadata" + + " store.", ledger.getName(), name, exception); synchronized (pendingMarkDeleteOps) { - while (!pendingMarkDeleteOps.isEmpty()) { - MarkDeleteEntry entry = pendingMarkDeleteOps.poll(); - entry.callback.markDeleteFailed(exception, entry.ctx); - } - // At this point we don't have a ledger ready STATE_UPDATER.set(ManagedCursorImpl.this, State.NoLedger); + // Note: if the stat is NoLedger, will persist the mark deleted position to metadata store. + // Before giving up, try to persist the position in the metadata store. + flushPendingMarkDeletes(); } } }); @@ -3010,7 +3100,7 @@ void persistPositionToLedger(final LedgerHandle lh, MarkDeleteEntry mdEntry, fin if (shouldCloseLedger(lh1)) { if (log.isDebugEnabled()) { - log.debug("[{}] Need to create new metadata ledger for consumer {}", ledger.getName(), name); + log.debug("[{}] Need to create new metadata ledger for cursor {}", ledger.getName(), name); } startCreatingNewMetadataLedger(); } @@ -3025,32 +3115,39 @@ void persistPositionToLedger(final LedgerHandle lh, MarkDeleteEntry mdEntry, fin // in the meantime the mark-delete will be queued. STATE_UPDATER.compareAndSet(ManagedCursorImpl.this, State.Open, State.NoLedger); - mbean.persistToLedger(false); - // Before giving up, try to persist the position in the metadata store - persistPositionMetaStore(-1, position, mdEntry.properties, new MetaStoreCallback() { - @Override - public void operationComplete(Void result, Stat stat) { - if (log.isDebugEnabled()) { - log.debug( - "[{}][{}] Updated cursor in meta store after previous failure in ledger at position" - + " {}", ledger.getName(), name, position); - } - mbean.persistToZookeeper(true); - callback.operationComplete(); - } - - @Override - public void operationFailed(MetaStoreException e) { - log.warn("[{}][{}] Failed to update cursor in meta store after previous failure in ledger: {}", - ledger.getName(), name, e.getMessage()); - mbean.persistToZookeeper(false); - callback.operationFailed(createManagedLedgerException(rc)); - } - }, true); + // Before giving up, try to persist the position in the metadata store. + persistPositionToMetaStore(mdEntry, callback); } }, null); } + void persistPositionToMetaStore(MarkDeleteEntry mdEntry, final VoidCallback callback) { + final PositionImpl newPosition = mdEntry.newPosition; + STATE_UPDATER.compareAndSet(ManagedCursorImpl.this, State.Open, State.NoLedger); + mbean.persistToLedger(false); + // Before giving up, try to persist the position in the metadata store + persistPositionMetaStore(-1, newPosition, mdEntry.properties, new MetaStoreCallback() { + @Override + public void operationComplete(Void result, Stat stat) { + if (log.isDebugEnabled()) { + log.debug( + "[{}][{}] Updated cursor in meta store after previous failure in ledger at position" + + " {}", ledger.getName(), name, newPosition); + } + mbean.persistToZookeeper(true); + callback.operationComplete(); + } + + @Override + public void operationFailed(MetaStoreException e) { + log.warn("[{}][{}] Failed to update cursor in meta store after previous failure in ledger: {}", + ledger.getName(), name, e.getMessage()); + mbean.persistToZookeeper(false); + callback.operationFailed(createManagedLedgerException(e)); + } + }, true); + } + boolean shouldCloseLedger(LedgerHandle lh) { long now = clock.millis(); if (ledger.getFactory().isMetadataServiceAvailable() @@ -3088,7 +3185,7 @@ public void operationComplete(Void result, Stat stat) { @Override public void operationFailed(MetaStoreException e) { - log.warn("[{}] Failed to update consumer {}", ledger.getName(), name, e); + log.warn("[{}] Failed to update cursor metadata {}", ledger.getName(), name, e); // it means it failed to switch the newly created ledger so, it should be // deleted to prevent leak deleteLedgerAsync(lh).thenRun(() -> callback.operationFailed(e)); @@ -3426,7 +3523,7 @@ public ManagedCursorMXBean getStats() { return this.mbean; } - void updateReadStats(int readEntriesCount, long readEntriesSize) { + public void updateReadStats(int readEntriesCount, long readEntriesSize) { this.entriesReadCount += readEntriesCount; this.entriesReadSize += readEntriesSize; } @@ -3458,7 +3555,7 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { }, null); } - private int applyMaxSizeCap(int maxEntries, long maxSizeBytes) { + public int applyMaxSizeCap(int maxEntries, long maxSizeBytes) { if (maxSizeBytes == NO_MAX_SIZE_LIMIT) { return maxEntries; } @@ -3516,4 +3613,29 @@ public boolean isCacheReadEntry() { public ManagedLedgerConfig getConfig() { return config; } + + /*** + * Create a non-durable cursor and copy the ack stats. + */ + public ManagedCursor duplicateNonDurableCursor(String nonDurableCursorName) throws ManagedLedgerException { + NonDurableCursorImpl newNonDurableCursor = + (NonDurableCursorImpl) ledger.newNonDurableCursor(getMarkDeletedPosition(), nonDurableCursorName); + if (individualDeletedMessages != null) { + this.individualDeletedMessages.forEach(range -> { + newNonDurableCursor.individualDeletedMessages.addOpenClosed( + range.lowerEndpoint().getLedgerId(), + range.lowerEndpoint().getEntryId(), + range.upperEndpoint().getLedgerId(), + range.upperEndpoint().getEntryId()); + return true; + }); + } + if (batchDeletedIndexes != null) { + for (Map.Entry entry : this.batchDeletedIndexes.entrySet()) { + BitSetRecyclable copiedBitSet = BitSetRecyclable.valueOf(entry.getValue()); + newNonDurableCursor.batchDeletedIndexes.put(entry.getKey(), copiedBitSet); + } + } + return newNonDurableCursor; + } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java index 9f3fe9bb0c4a7..ce76717942d41 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java @@ -329,7 +329,7 @@ public void asyncOpen(String name, OpenLedgerCallback callback, Object ctx) { @Override public void asyncOpen(final String name, final ManagedLedgerConfig config, final OpenLedgerCallback callback, - Supplier mlOwnershipChecker, final Object ctx) { + Supplier> mlOwnershipChecker, final Object ctx) { if (closed) { callback.openLedgerFailed(new ManagedLedgerException.ManagedLedgerFactoryClosedException(), ctx); return; @@ -504,9 +504,19 @@ public void openReadOnlyManagedLedgerFailed(ManagedLedgerException exception, Ob } void close(ManagedLedger ledger) { - // Remove the ledger from the internal factory cache - ledgers.remove(ledger.getName()); - entryCacheManager.removeEntryCache(ledger.getName()); + // If the future in map is not done or has exceptionally complete, it means that @param-ledger is not in the + // map. + CompletableFuture ledgerFuture = ledgers.get(ledger.getName()); + if (ledgerFuture == null || !ledgerFuture.isDone() || ledgerFuture.isCompletedExceptionally()){ + return; + } + if (ledgerFuture.join() != ledger){ + return; + } + // Remove the ledger from the internal factory cache. + if (ledgers.remove(ledger.getName(), ledgerFuture)) { + entryCacheManager.removeEntryCache(ledger.getName()); + } } public CompletableFuture shutdownAsync() throws ManagedLedgerException { @@ -869,7 +879,10 @@ public void asyncDelete(String name, CompletableFuture mlCo // If it's open, delete in the normal way ml.asyncDelete(callback, ctx); }).exceptionally(ex -> { - // If it's failing to get open, just delete from metadata + // If it fails to get open, it will be cleaned by managed ledger opening error handling. + // then retry will go to `future=null` branch. + final Throwable rc = FutureUtil.unwrapCompletionException(ex); + callback.deleteLedgerFailed(getManagedLedgerException(rc), ctx); return null; }); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java index cf3d7142d617e..5a6bc8017b7e0 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.impl; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.ManagedLedgerFactoryMXBean; @@ -41,6 +42,7 @@ public ManagedLedgerFactoryMBeanImpl(ManagedLedgerFactoryImpl factory) throws Ex } public void refreshStats(long period, TimeUnit unit) { + checkArgument(period >= 0); double seconds = unit.toMillis(period) / 1000.0; if (seconds <= 0.0) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index c74db884a95a9..6727fc63479b4 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -59,7 +59,6 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReference; @@ -138,6 +137,7 @@ import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.LazyLoadableValue; import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; import org.apache.pulsar.metadata.api.Stat; import org.slf4j.Logger; @@ -232,7 +232,7 @@ public class ManagedLedgerImpl implements ManagedLedger, CreateCallback { private static final Random random = new Random(System.currentTimeMillis()); private long maximumRolloverTimeMs; - protected final Supplier mlOwnershipChecker; + protected final Supplier> mlOwnershipChecker; volatile PositionImpl lastConfirmedEntry; @@ -336,7 +336,7 @@ public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper } public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store, ManagedLedgerConfig config, OrderedScheduler scheduledExecutor, - final String name, final Supplier mlOwnershipChecker) { + final String name, final Supplier> mlOwnershipChecker) { this.factory = factory; this.bookKeeper = bookKeeper; this.config = config; @@ -528,7 +528,8 @@ public void operationFailed(MetaStoreException e) { return; } - log.info("[{}] Created ledger {}", name, lh.getId()); + log.info("[{}] Created ledger {} after closed {}", name, lh.getId(), + currentLedger == null ? "null" : currentLedger.getId()); STATE_UPDATER.set(this, State.LedgerOpened); updateLastLedgerCreatedTimeAndScheduleRolloverTask(); currentLedger = lh; @@ -1239,6 +1240,10 @@ public CompletableFuture getEarliestMessagePublishTimeOfPos(PositionImpl p } PositionImpl nextPos = getNextValidPosition(pos); + if (nextPos.compareTo(lastConfirmedEntry) > 0) { + return CompletableFuture.completedFuture(-1L); + } + asyncReadEntry(nextPos, new ReadEntryCallback() { @Override public void readEntryComplete(Entry entry, Object ctx) { @@ -1258,6 +1263,12 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { log.error("Error read entry for position {}", nextPos, exception); future.completeExceptionally(exception); } + + @Override + public String toString() { + return String.format("ML [%s] get earliest message publish time of pos", + ManagedLedgerImpl.this.name); + } }, null); return future; @@ -1528,6 +1539,15 @@ private void closeAllCursors(CloseCallback callback, final Object ctx) { @Override public synchronized void createComplete(int rc, final LedgerHandle lh, Object ctx) { + if (STATE_UPDATER.get(this) == State.Closed) { + if (lh != null) { + log.warn("[{}] ledger create completed after the managed ledger is closed rc={} ledger={}, so just" + + " close this ledger handle.", name, rc, lh != null ? lh.getId() : -1); + lh.closeAsync(); + } + return; + } + if (log.isDebugEnabled()) { log.debug("[{}] createComplete rc={} ledger={}", name, rc, lh != null ? lh.getId() : -1); } @@ -1741,9 +1761,17 @@ synchronized void ledgerClosed(final LedgerHandle lh) { } } + @Override + public void skipNonRecoverableLedger(long ledgerId){ + for (ManagedCursor managedCursor : cursors) { + managedCursor.skipNonRecoverableLedger(ledgerId); + } + } + synchronized void createLedgerAfterClosed() { if (isNeededCreateNewLedgerAfterCloseLedger()) { - log.info("[{}] Creating a new ledger after closed", name); + log.info("[{}] Creating a new ledger after closed {}", name, + currentLedger == null ? "null" : currentLedger.getId()); STATE_UPDATER.set(this, State.CreatingLedger); this.lastLedgerCreationInitiationTimestamp = System.currentTimeMillis(); mbean.startDataLedgerCreateOp(); @@ -1775,15 +1803,19 @@ public void closeComplete(int rc, LedgerHandle lh, Object o) { + "acked ledgerId %s", currentLedger.getId(), lh.getId()); if (rc == BKException.Code.OK) { - log.debug("Successfully closed ledger {}", lh.getId()); + if (log.isDebugEnabled()) { + log.debug("[{}] Successfully closed ledger {}, trigger by rollover full ledger", + name, lh.getId()); + } } else { - log.warn("Error when closing ledger {}. Status={}", lh.getId(), BKException.getMessage(rc)); + log.warn("[{}] Error when closing ledger {}, trigger by rollover full ledger, Status={}", + name, lh.getId(), BKException.getMessage(rc)); } ledgerClosed(lh); createLedgerAfterClosed(); } - }, System.nanoTime()); + }, null); } } @@ -2539,15 +2571,13 @@ private void maybeOffload(long offloadThresholdInBytes, long offloadThresholdInS } } - private boolean hasLedgerRetentionExpired(long ledgerTimestamp) { - return config.getRetentionTimeMillis() >= 0 - && clock.millis() - ledgerTimestamp > config.getRetentionTimeMillis(); + private boolean hasLedgerRetentionExpired(long retentionTimeMs, long ledgerTimestamp) { + return retentionTimeMs >= 0 && clock.millis() - ledgerTimestamp > retentionTimeMs; } - private boolean isLedgerRetentionOverSizeQuota(long sizeToDelete) { + private boolean isLedgerRetentionOverSizeQuota(long retentionSizeInMB, long totalSizeOfML, long sizeToDelete) { // Handle the -1 size limit as "infinite" size quota - return config.getRetentionSizeInMB() >= 0 - && TOTAL_SIZE_UPDATER.get(this) - sizeToDelete >= config.getRetentionSizeInMB() * MegaByte; + return retentionSizeInMB >= 0 && totalSizeOfML - sizeToDelete >= retentionSizeInMB * MegaByte; } boolean isOffloadedNeedsDelete(OffloadContext offload, Optional offloadPolicies) { @@ -2606,6 +2636,11 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { } long slowestReaderLedgerId = -1; + final LazyLoadableValue slowestNonDurationLedgerId = + new LazyLoadableValue(() -> getTheSlowestNonDurationReadPosition().getLedgerId()); + final long retentionSizeInMB = config.getRetentionSizeInMB(); + final long retentionTimeMs = config.getRetentionTimeMillis(); + final long totalSizeOfML = TOTAL_SIZE_UPDATER.get(this); if (!cursors.hasDurableCursors()) { // At this point the lastLedger will be pointing to the // ledger that has just been closed, therefore the +1 to @@ -2628,7 +2663,10 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { long totalSizeToDelete = 0; // skip ledger if retention constraint met - for (LedgerInfo ls : ledgers.headMap(slowestReaderLedgerId, false).values()) { + Iterator ledgerInfoIterator = + ledgers.headMap(slowestReaderLedgerId, false).values().iterator(); + while (ledgerInfoIterator.hasNext()){ + LedgerInfo ls = ledgerInfoIterator.next(); // currentLedger can not be deleted if (ls.getLedgerId() == currentLedger.getId()) { if (log.isDebugEnabled()) { @@ -2648,8 +2686,9 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { } totalSizeToDelete += ls.getSize(); - boolean overRetentionQuota = isLedgerRetentionOverSizeQuota(totalSizeToDelete); - boolean expired = hasLedgerRetentionExpired(ls.getTimestamp()); + boolean overRetentionQuota = isLedgerRetentionOverSizeQuota(retentionSizeInMB, totalSizeOfML, + totalSizeToDelete); + boolean expired = hasLedgerRetentionExpired(retentionTimeMs, ls.getTimestamp()); if (log.isDebugEnabled()) { log.debug( "[{}] Checking ledger {} -- time-old: {} sec -- " @@ -2666,14 +2705,19 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { } ledgersToDelete.add(ls); } else { - if (ls.getLedgerId() < getTheSlowestNonDurationReadPosition().getLedgerId()) { - // once retention constraint has been met, skip check - if (log.isDebugEnabled()) { - log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, - ls.getLedgerId()); - } - invalidateReadHandle(ls.getLedgerId()); + // once retention constraint has been met, skip check + if (log.isDebugEnabled()) { + log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, ls.getLedgerId()); } + releaseReadHandleIfNoLongerRead(ls.getLedgerId(), slowestNonDurationLedgerId.getValue()); + break; + } + } + + while (ledgerInfoIterator.hasNext()) { + LedgerInfo ls = ledgerInfoIterator.next(); + if (!releaseReadHandleIfNoLongerRead(ls.getLedgerId(), slowestNonDurationLedgerId.getValue())) { + break; } } @@ -2759,6 +2803,21 @@ public void operationFailed(MetaStoreException e) { } } + /** + * @param ledgerId the ledger handle which maybe will be released. + * @return if the ledger handle was released. + */ + private boolean releaseReadHandleIfNoLongerRead(long ledgerId, long slowestNonDurationLedgerId) { + if (ledgerId < slowestNonDurationLedgerId) { + if (log.isDebugEnabled()) { + log.debug("[{}] Ledger {} no longer needs to be read, close the cached readHandle", name, ledgerId); + } + invalidateReadHandle(ledgerId); + return true; + } + return false; + } + protected void doDeleteLedgers(List ledgersToDelete) { PositionImpl currentLastConfirmedEntry = lastConfirmedEntry; // Update metadata @@ -2790,15 +2849,14 @@ void advanceCursorsIfNecessary(List ledgersToDelete) throws LedgerNo return; } - // need to move mark delete for non-durable cursors to the first ledger NOT marked for deletion - // calling getNumberOfEntries latter for a ledger that is already deleted will be problematic and return - // incorrect results - Long firstNonDeletedLedger = ledgers.higherKey(ledgersToDelete.get(ledgersToDelete.size() - 1).getLedgerId()); - if (firstNonDeletedLedger == null) { - throw new LedgerNotExistException("First non deleted Ledger is not found"); + // Just ack messages like a consumer. Normally, consumers will not confirm a position that does not exist, so + // find the latest existing position to ack. + PositionImpl highestPositionToDelete = calculateLastEntryInLedgerList(ledgersToDelete); + if (highestPositionToDelete == null) { + log.warn("[{}] The ledgers to be trim are all empty, skip to advance non-durable cursors: {}", + name, ledgersToDelete); + return; } - PositionImpl highestPositionToDelete = new PositionImpl(firstNonDeletedLedger, -1); - cursors.forEach(cursor -> { // move the mark delete position to the highestPositionToDelete only if it is smaller than the add confirmed // to prevent the edge case where the cursor is caught up to the latest and highestPositionToDelete may be @@ -2822,6 +2880,19 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { }); } + /** + * @return null if all ledgers is empty. + */ + private PositionImpl calculateLastEntryInLedgerList(List ledgersToDelete) { + for (int i = ledgersToDelete.size() - 1; i >= 0; i--) { + LedgerInfo ledgerInfo = ledgersToDelete.get(i); + if (ledgerInfo != null && ledgerInfo.hasEntries() && ledgerInfo.getEntries() > 0) { + return PositionImpl.get(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1); + } + } + return null; + } + /** * Delete this ManagedLedger completely from the system. * @@ -3139,7 +3210,7 @@ public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ct } } - private void offloadLoop(CompletableFuture promise, Queue ledgersToOffload, + void offloadLoop(CompletableFuture promise, Queue ledgersToOffload, PositionImpl firstUnoffloaded, Optional firstError) { State currentState = getState(); if (currentState == State.Closed) { @@ -3172,7 +3243,7 @@ private void offloadLoop(CompletableFuture promise, Queue config.getLedgerOffloader().offload(readHandle, uuid, extraMetadata)) .thenCompose((ignore) -> { return Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), - TimeUnit.SECONDS.toHours(1)).limit(10), + TimeUnit.HOURS.toMillis(1)).limit(10), FAIL_ON_CONFLICT, () -> completeLedgerInfoForOffloaded(ledgerId, uuid), scheduledExecutor, name) @@ -3187,6 +3258,7 @@ private void offloadLoop(CompletableFuture promise, Queue= 0) { + return true; + } + if (specifiedLac.getEntryId() < 0) { + // Calculate the meaningful LAC. + PositionImpl actLac = getPreviousPosition(specifiedLac); + if (actLac.getEntryId() >= 0) { + return pos.compareTo(actLac) >= 0; + } else { + // If the actual LAC is still not meaningful. + if (actLac.equals(specifiedLac)) { + // No entries in maneged ledger. + return true; + } else { + // Continue to find a valid LAC. + return isNoMessagesAfterPosForSpecifiedLac(actLac, pos); + } + } + } + return false; + } + /** * Get the entry position that come before the specified position in the message stream, using information from the * ledger list and each ledger entries count. @@ -3542,23 +3642,30 @@ public PositionImpl getPreviousPosition(PositionImpl position) { * @return true if the position is valid, false otherwise */ public boolean isValidPosition(PositionImpl position) { - PositionImpl last = lastConfirmedEntry; + PositionImpl lac = lastConfirmedEntry; if (log.isDebugEnabled()) { - log.debug("IsValid position: {} -- last: {}", position, last); + log.debug("IsValid position: {} -- last: {}", position, lac); } - if (position.getEntryId() < 0) { + if (!ledgers.containsKey(position.getLedgerId())){ return false; - } else if (position.getLedgerId() > last.getLedgerId()) { + } else if (position.getEntryId() < 0) { return false; - } else if (position.getLedgerId() == last.getLedgerId()) { - return position.getEntryId() <= (last.getEntryId() + 1); + } else if (currentLedger != null && position.getLedgerId() == currentLedger.getId()) { + // If current ledger is empty, the largest read position can be "{current_ledger: 0}". + // Else, the read position can be set to "{LAC + 1}" when subscribe at LATEST, + return (position.getLedgerId() == lac.getLedgerId() && position.getEntryId() <= lac.getEntryId() + 1) + || position.getEntryId() == 0; + } else if (position.getLedgerId() == lac.getLedgerId()) { + // The ledger witch maintains LAC was closed, and there is an empty current ledger. + // If entry id is larger than LAC, it should be "{current_ledger: 0}". + return position.getEntryId() <= lac.getEntryId(); } else { // Look in the ledgers map LedgerInfo ls = ledgers.get(position.getLedgerId()); if (ls == null) { - if (position.getLedgerId() < last.getLedgerId()) { + if (position.getLedgerId() < lac.getLedgerId()) { // Pointing to a non-existing ledger that is older than the current ledger is invalid return false; } else { @@ -3933,7 +4040,7 @@ public static ManagedLedgerException createManagedLedgerException(Throwable t) { */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map metadata) { - AtomicBoolean ledgerCreated = new AtomicBoolean(false); + CompletableFuture ledgerFutureHook = new CompletableFuture<>(); Map finalMetadata = new HashMap<>(); finalMetadata.putAll(ledgerMetadata); finalMetadata.putAll(metadata); @@ -3946,33 +4053,39 @@ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig conf )); } catch (EnsemblePlacementPolicyConfig.ParseEnsemblePlacementPolicyConfigException e) { log.error("[{}] Serialize the placement configuration failed", name, e); - cb.createComplete(Code.UnexpectedConditionException, null, ledgerCreated); + cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook); return; } } createdLedgerCustomMetadata = finalMetadata; - try { bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), - config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerCreated, finalMetadata); + config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerFutureHook, finalMetadata); } catch (Throwable cause) { log.error("[{}] Encountered unexpected error when creating ledger", name, cause); - cb.createComplete(Code.UnexpectedConditionException, null, ledgerCreated); + ledgerFutureHook.completeExceptionally(cause); + cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook); return; } - scheduledExecutor.schedule(() -> { - if (!ledgerCreated.get()) { + + ScheduledFuture timeoutChecker = scheduledExecutor.schedule(() -> { + if (!ledgerFutureHook.isDone() + && ledgerFutureHook.completeExceptionally(new TimeoutException(name + " Create ledger timeout"))) { if (log.isDebugEnabled()) { log.debug("[{}] Timeout creating ledger", name); } - cb.createComplete(BKException.Code.TimeoutException, null, ledgerCreated); + cb.createComplete(BKException.Code.TimeoutException, null, ledgerFutureHook); } else { if (log.isDebugEnabled()) { log.debug("[{}] Ledger already created when timeout task is triggered", name); } } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); + + ledgerFutureHook.whenComplete((ignore, ex) -> { + timeoutChecker.cancel(false); + }); } public Clock getClock() { @@ -3981,16 +4094,12 @@ public Clock getClock() { /** * check if ledger-op task is already completed by timeout-task. If completed then delete the created ledger - * - * @param rc - * @param lh - * @param ctx * @return */ protected boolean checkAndCompleteLedgerOpTask(int rc, LedgerHandle lh, Object ctx) { - if (ctx instanceof AtomicBoolean) { + if (ctx instanceof CompletableFuture) { // ledger-creation is already timed out and callback is already completed so, delete this ledger and return. - if (((AtomicBoolean) (ctx)).compareAndSet(false, true)) { + if (((CompletableFuture) ctx).complete(lh)) { return false; } else { if (rc == BKException.Code.OK) { @@ -4349,12 +4458,35 @@ private void cancelScheduledTasks() { } @Override - public void checkInactiveLedgerAndRollOver() { + public boolean checkInactiveLedgerAndRollOver() { long currentTimeMs = System.currentTimeMillis(); - if (inactiveLedgerRollOverTimeMs > 0 && currentTimeMs > (lastAddEntryTimeMs + inactiveLedgerRollOverTimeMs)) { + if (currentLedgerEntries > 0 && inactiveLedgerRollOverTimeMs > 0 && currentTimeMs > (lastAddEntryTimeMs + + inactiveLedgerRollOverTimeMs)) { log.info("[{}] Closing inactive ledger, last-add entry {}", name, lastAddEntryTimeMs); - ledgerClosed(currentLedger); + if (STATE_UPDATER.compareAndSet(this, State.LedgerOpened, State.ClosingLedger)) { + LedgerHandle currentLedger = this.currentLedger; + currentLedger.asyncClose((rc, lh, o) -> { + checkArgument(currentLedger.getId() == lh.getId(), "ledgerId %s doesn't match with " + + "acked ledgerId %s", currentLedger.getId(), lh.getId()); + + if (rc == BKException.Code.OK) { + if (log.isDebugEnabled()) { + log.debug("[{}] Successfully closed ledger {}, trigger by inactive ledger check", + name, lh.getId()); + } + } else { + log.warn("[{}] Error when closing ledger {}, trigger by inactive ledger check, Status={}", + name, lh.getId(), BKException.getMessage(rc)); + } + + ledgerClosed(lh); + createLedgerAfterClosed(); + // we do not create ledger here, since topic is inactive for a long time. + }, null); + return true; + } } + return false; } @@ -4407,4 +4539,4 @@ public Position getTheSlowestNonDurationReadPosition() { } return theSlowestNonDurableReadPosition; } -} +} \ No newline at end of file diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java index cb3d72cc5972f..7884add95526c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.impl; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -63,6 +64,7 @@ public ManagedLedgerMBeanImpl(ManagedLedgerImpl managedLedger) { } public void refreshStats(long period, TimeUnit unit) { + checkArgument(period >= 0); double seconds = unit.toMillis(period) / 1000.0; if (seconds <= 0.0) { // skip refreshing stats @@ -100,8 +102,8 @@ public void recordReadEntriesError() { readEntriesOpsFailed.recordEvent(); } - public void recordReadEntriesOpsCacheMisses() { - readEntriesOpsCacheMisses.recordEvent(); + public void recordReadEntriesOpsCacheMisses(int count, long totalSize) { + readEntriesOpsCacheMisses.recordMultipleEvents(count, totalSize); } public void addAddEntryLatencySample(long latency, TimeUnit unit) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java index fcce1f7766cc8..1bc2d2b04bed0 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java @@ -23,7 +23,6 @@ import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -182,7 +181,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat) @Override public void operationFailed(MetaStoreException e) { if (e instanceof MetadataNotFoundException) { - result.complete(Collections.emptyMap()); + result.complete(new HashMap<>()); } else { result.completeExceptionally(e); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java index 9d2829b1707f4..77216ce2e4588 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java @@ -70,7 +70,7 @@ public class NonDurableCursorImpl extends ManagedCursorImpl { private void recoverCursor(PositionImpl mdPosition) { Pair lastEntryAndCounter = ledger.getLastPositionAndCounter(); this.readPosition = isReadCompacted() ? mdPosition.getNext() : ledger.getNextValidPosition(mdPosition); - markDeletePosition = mdPosition; + markDeletePosition = ledger.getPreviousPosition(this.readPosition); // Initialize the counter such that the difference between the messages written on the ML and the // messagesConsumed is equal to the current backlog (negated). @@ -138,8 +138,12 @@ public void rewind() { @Override public synchronized String toString() { - return MoreObjects.toStringHelper(this).add("ledger", ledger.getName()).add("ackPos", markDeletePosition) - .add("readPos", readPosition).toString(); + return MoreObjects.toStringHelper(this) + .add("ledger", ledger.getName()) + .add("cursor", getName()) + .add("ackPos", markDeletePosition) + .add("readPos", readPosition) + .toString(); } private static final Logger log = LoggerFactory.getLogger(NonDurableCursorImpl.class); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java index 19211553a5f74..7b59c3903d5bc 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java @@ -116,9 +116,11 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { readPosition, exception.getMessage()); final ManagedLedgerImpl ledger = (ManagedLedgerImpl) cursor.getManagedLedger(); Position nexReadPosition; + Long lostLedger = null; if (exception instanceof ManagedLedgerException.LedgerNotExistException) { // try to find and move to next valid ledger nexReadPosition = cursor.getNextLedgerPosition(readPosition.getLedgerId()); + lostLedger = readPosition.ledgerId; } else { // Skip this read operation nexReadPosition = ledger.getValidPositionAfterSkippedEntries(readPosition, count); @@ -131,6 +133,9 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { return; } updateReadPosition(nexReadPosition); + if (lostLedger != null) { + cursor.getManagedLedger().skipNonRecoverableLedger(lostLedger); + } checkReadCompletion(); } else { if (!(exception instanceof TooManyRequestsException)) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java index 9102339b2904e..1661613f07d7d 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java @@ -23,6 +23,7 @@ import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.ReadOnlyCursor; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.PositionBound; import org.apache.bookkeeper.mledger.proto.MLDataFormats; @@ -70,4 +71,9 @@ public MLDataFormats.ManagedLedgerInfo.LedgerInfo getCurrentLedgerInfo() { public long getNumberOfEntries(Range range) { return this.ledger.getNumberOfEntries(range); } + + @Override + public boolean isMessageDeleted(Position position) { + return false; + } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyManagedLedgerImpl.java index 944674f6862c2..1fdf69395068f 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyManagedLedgerImpl.java @@ -143,8 +143,8 @@ public void asyncReadEntry(PositionImpl position, AsyncCallbacks.ReadEntryCallba this.getLedgerHandle(position.getLedgerId()) .thenAccept((ledger) -> asyncReadEntry(ledger, position, callback, ctx)) .exceptionally((ex) -> { - log.error("[{}] Error opening ledger for reading at position {} - {}", this.name, position, - ex.getMessage()); + log.error("[{}] Error opening ledger for reading at position {} - {}. Op: {}", this.name, + position, ex.getMessage(), callback); callback.readEntryFailed(ManagedLedgerException.getManagedLedgerException(ex.getCause()), ctx); return null; }); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java index b33dd87543f77..8b2742d958783 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.AsyncCallback; @@ -50,7 +51,7 @@ public class ShadowManagedLedgerImpl extends ManagedLedgerImpl { public ShadowManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store, ManagedLedgerConfig config, OrderedScheduler scheduledExecutor, - String name, final Supplier mlOwnershipChecker) { + String name, final Supplier> mlOwnershipChecker) { super(factory, bookKeeper, store, config, scheduledExecutor, name, mlOwnershipChecker); this.sourceMLName = config.getShadowSourceName(); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java index d2add99b701ac..d1050e0062826 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java @@ -93,7 +93,7 @@ public void asyncReadEntry(ReadHandle lh, long firstEntry, long lastEntry, boole } finally { ledgerEntries.close(); } - ml.getMbean().recordReadEntriesOpsCacheMisses(); + ml.getMbean().recordReadEntriesOpsCacheMisses(entries.size(), totalSize); ml.getFactory().getMbean().recordCacheMiss(entries.size(), totalSize); ml.getMbean().addReadEntriesSample(entries.size(), totalSize); @@ -121,7 +121,7 @@ public void asyncReadEntry(ReadHandle lh, PositionImpl position, AsyncCallbacks. LedgerEntry ledgerEntry = iterator.next(); EntryImpl returnEntry = RangeEntryCacheManagerImpl.create(ledgerEntry, interceptor); - ml.getMbean().recordReadEntriesOpsCacheMisses(); + ml.getMbean().recordReadEntriesOpsCacheMisses(1, returnEntry.getLength()); ml.getFactory().getMbean().recordCacheMiss(1, returnEntry.getLength()); ml.getMbean().addReadEntriesSample(1, returnEntry.getLength()); callback.readEntryComplete(returnEntry, ctx); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java index 7747f9bcd93b6..27aec6f178e39 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java @@ -256,7 +256,7 @@ private void asyncReadEntry0(ReadHandle lh, PositionImpl position, final ReadEnt LedgerEntry ledgerEntry = iterator.next(); EntryImpl returnEntry = RangeEntryCacheManagerImpl.create(ledgerEntry, interceptor); - ml.getMbean().recordReadEntriesOpsCacheMisses(); + ml.getMbean().recordReadEntriesOpsCacheMisses(1, returnEntry.getLength()); manager.mlFactoryMBean.recordCacheMiss(1, returnEntry.getLength()); ml.getMbean().addReadEntriesSample(1, returnEntry.getLength()); callback.readEntryComplete(returnEntry, ctx); @@ -450,7 +450,7 @@ CompletableFuture> readFromStorage(ReadHandle lh, } } - ml.getMbean().recordReadEntriesOpsCacheMisses(); + ml.getMbean().recordReadEntriesOpsCacheMisses(entriesToReturn.size(), totalSize); manager.mlFactoryMBean.recordCacheMiss(entriesToReturn.size(), totalSize); ml.getMbean().addReadEntriesSample(entriesToReturn.size(), totalSize); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java index 550626f76c000..9c9feb2aa7f7c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java @@ -198,7 +198,7 @@ public static CompletableFuture cleanupOffloaded(long ledgerId, UUID uuid, metadataMap.put("ManagedLedgerName", name); return Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), - TimeUnit.SECONDS.toHours(1)).limit(10), + TimeUnit.HOURS.toMillis(1)).limit(10), Retries.NonFatalPredicate, () -> mlConfig.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, metadataMap), executor, name).whenComplete((ignored, exception) -> { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java index 7599e2cc1874f..d34857e5e5177 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java @@ -27,7 +27,6 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.tuple.Pair; /** @@ -74,13 +73,18 @@ public RangeCache(Weighter weighter, TimestampExtractor timestampE * @return whether the entry was inserted in the cache */ public boolean put(Key key, Value value) { - MutableBoolean flag = new MutableBoolean(); - entries.computeIfAbsent(key, (k) -> { - size.addAndGet(weighter.getSize(value)); - flag.setValue(true); - return value; - }); - return flag.booleanValue(); + // retain value so that it's not released before we put it in the cache and calculate the weight + value.retain(); + try { + if (entries.putIfAbsent(key, value) == null) { + size.addAndGet(weighter.getSize(value)); + return true; + } else { + return false; + } + } finally { + value.release(); + } } public boolean exists(Key key) { @@ -242,7 +246,6 @@ public synchronized Pair clear() { value.release(); } - entries.clear(); size.getAndAdd(-removedSize); return Pair.of(removedCount, removedSize); } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java index 2c01b778caf6b..04d99d3bdf480 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java @@ -258,6 +258,11 @@ public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate AsyncCallbacks.FindEntryCallback callback, Object ctx) { } + @Override + public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate condition, + AsyncCallbacks.FindEntryCallback callback, Object ctx, boolean isFindFromLedger) { + } + @Override public void asyncResetCursor(final Position position, boolean forceReset, AsyncCallbacks.ResetCursorCallback callback) { diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java index 1b1b5534256f9..644f53c3a522d 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java @@ -43,6 +43,7 @@ import java.util.Arrays; import java.util.BitSet; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -65,6 +66,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; import lombok.Cleanup; @@ -72,6 +74,7 @@ import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.BookKeeper.DigestType; import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.api.ReadHandle; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCallback; @@ -229,6 +232,97 @@ void readTwice() throws Exception { entries.forEach(Entry::release); } + @Test + void testPersistentMarkDeleteIfCreateCursorLedgerFailed() throws Exception { + final int entryCount = 10; + final String cursorName = "c1"; + final String mlName = "ml_test"; + final ManagedLedgerConfig mlConfig = new ManagedLedgerConfig().setMaxEntriesPerLedger(1); + ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + + ManagedCursor cursor = ml.openCursor("c1"); + Position lastEntry = null; + for (int i = 0; i < 10; i++) { + lastEntry = ml.addEntry(("entry-" + i).getBytes(Encoding)); + } + + // Mock cursor ledger create failed. + bkc.failNow(BKException.Code.NoBookieAvailableException); + + cursor.markDelete(lastEntry); + + // Assert persist mark deleted position to ZK was successful. + PositionImpl slowestReadPosition = ml.getCursors().getSlowestReaderPosition(); + assertTrue(slowestReadPosition.getLedgerId() >= lastEntry.getLedgerId()); + assertTrue(slowestReadPosition.getEntryId() >= lastEntry.getEntryId()); + assertEquals(cursor.getStats().getPersistLedgerSucceed(), 0); + assertTrue(cursor.getStats().getPersistZookeeperSucceed() > 0); + assertEquals(cursor.getPersistentMarkDeletedPosition(), lastEntry); + + // Verify the mark delete position can be recovered properly. + ml.close(); + ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + ManagedCursorImpl cursorRecovered = (ManagedCursorImpl) ml.openCursor(cursorName); + assertEquals(cursorRecovered.getPersistentMarkDeletedPosition(), lastEntry); + + // cleanup. + ml.delete(); + } + + @Test + void testPersistentMarkDeleteIfSwitchCursorLedgerFailed() throws Exception { + final int entryCount = 10; + final String cursorName = "c1"; + final String mlName = "ml_test"; + final ManagedLedgerConfig mlConfig = new ManagedLedgerConfig().setMaxEntriesPerLedger(1); + ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + + final ManagedCursorImpl cursor = (ManagedCursorImpl) ml.openCursor(cursorName); + ArrayList positions = new ArrayList<>(); + for (int i = 0; i < entryCount; i++) { + positions.add(ml.addEntry(("entry-" + i).getBytes(Encoding))); + } + // Trigger the cursor ledger creating. + cursor.markDelete(positions.get(0)); + assertTrue(cursor.getStats().getPersistLedgerSucceed() > 0); + + // Mock cursor ledger write failed. + bkc.addEntryFailAfter(0, BKException.Code.NoBookieAvailableException); + // Trigger a failed writing of the cursor ledger, then wait the stat of cursor to be "NoLedger". + // This time ZK will be written due to a failure to write BK. + cursor.markDelete(positions.get(1)); + Awaitility.await().untilAsserted(() -> { + assertEquals(cursor.getState(), "NoLedger"); + }); + assertTrue(cursor.getStats().getPersistLedgerErrors() > 0); + long persistZookeeperSucceed1 = cursor.getStats().getPersistZookeeperSucceed(); + assertTrue(persistZookeeperSucceed1 > 0); + + // Mock cursor ledger create failed. + bkc.failNow(BKException.Code.NoBookieAvailableException); + // Verify the cursor status will be persistent to ZK even if the cursor ledger creation always fails. + // This time ZK will be written due to catch up. + Position lastEntry = positions.get(entryCount -1); + cursor.markDelete(lastEntry); + long persistZookeeperSucceed2 = cursor.getStats().getPersistZookeeperSucceed(); + assertTrue(persistZookeeperSucceed2 > persistZookeeperSucceed1); + + // Assert persist mark deleted position to ZK was successful. + PositionImpl slowestReadPosition = ml.getCursors().getSlowestReaderPosition(); + assertTrue(slowestReadPosition.getLedgerId() >= lastEntry.getLedgerId()); + assertTrue(slowestReadPosition.getEntryId() >= lastEntry.getEntryId()); + assertEquals(cursor.getPersistentMarkDeletedPosition(), lastEntry); + + // Verify the mark delete position can be recovered properly. + ml.close(); + ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + ManagedCursorImpl cursorRecovered = (ManagedCursorImpl) ml.openCursor(cursorName); + assertEquals(cursorRecovered.getPersistentMarkDeletedPosition(), lastEntry); + + // cleanup. + ml.delete(); + } + @Test(timeOut = 20000) void readWithCacheDisabled() throws Exception { ManagedLedgerFactoryConfig config = new ManagedLedgerFactoryConfig(); @@ -675,7 +769,7 @@ void testResetCursor() throws Exception { @Test(timeOut = 20000) void testResetCursor1() throws Exception { ManagedLedger ledger = factory.open("my_test_move_cursor_ledger", - new ManagedLedgerConfig().setMaxEntriesPerLedger(2)); + new ManagedLedgerConfig().setMaxEntriesPerLedger(2)); ManagedCursor cursor = ledger.openCursor("trc1"); PositionImpl actualEarliest = (PositionImpl) ledger.addEntry("dummy-entry-1".getBytes(Encoding)); ledger.addEntry("dummy-entry-2".getBytes(Encoding)); @@ -1421,6 +1515,17 @@ void errorRecoveringCursor2() throws Exception { ledger = factory2.open("my_test_ledger"); ManagedCursor cursor = ledger.openCursor("c1"); Position position = ledger.addEntry("test".getBytes()); + // Make persist zk fail once. + AtomicInteger persistZKTimes = new AtomicInteger(); + metadataStore.failConditional(new MetadataStoreException.BadVersionException("mock ex"), (type, path) -> { + if (FaultInjectionMetadataStore.OperationType.PUT.equals(type) + && path.equals("/managed-ledgers/my_test_ledger/c1")) { + if (persistZKTimes.incrementAndGet() == 1) { + return true; + } + } + return false; + }); try { cursor.markDelete(position); fail("should have failed"); @@ -2184,7 +2289,7 @@ void testFindNewestMatchingEdgeCase1() throws Exception { ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1"); assertNull(c1.findNewestMatching( - entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding)))); + entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding)))); } @Test(timeOut = 20000) @@ -2493,7 +2598,7 @@ public void findEntryComplete(Position position, Object ctx) { @Override public void findEntryFailed(ManagedLedgerException exception, Optional failedReadPosition, - Object ctx) { + Object ctx) { result.exception = exception; counter.countDown(); } @@ -2519,7 +2624,7 @@ public void findEntryFailed(ManagedLedgerException exception, Optional } void internalTestFindNewestMatchingAllEntries(final String name, final int entriesPerLedger, - final int expectedEntryId) throws Exception { + final int expectedEntryId) throws Exception { final String ledgerAndCursorName = name; ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setRetentionSizeInMB(10); @@ -2613,7 +2718,7 @@ void testReplayEntries() throws Exception { assertTrue((Arrays.equals(entries.get(0).getData(), "entry1".getBytes(Encoding)) && Arrays.equals(entries.get(1).getData(), "entry3".getBytes(Encoding))) || (Arrays.equals(entries.get(0).getData(), "entry3".getBytes(Encoding)) - && Arrays.equals(entries.get(1).getData(), "entry1".getBytes(Encoding)))); + && Arrays.equals(entries.get(1).getData(), "entry1".getBytes(Encoding)))); entries.forEach(Entry::release); // 3. Fail on reading non-existing position @@ -3040,7 +3145,7 @@ public void operationFailed(ManagedLedgerException exception) { try { bkc.openLedgerNoRecovery(ledgerId, DigestType.fromApiDigestType(mlConfig.getDigestType()), - mlConfig.getPassword()); + mlConfig.getPassword()); fail("ledger should have deleted due to update-cursor failure"); } catch (BKException e) { // ok @@ -3659,17 +3764,17 @@ private void deleteBatchIndex(ManagedCursor cursor, Position position, int batch pos.ackSet = bitSet.toLongArray(); cursor.asyncDelete(pos, - new DeleteCallback() { - @Override - public void deleteComplete(Object ctx) { - latch.countDown(); - } + new DeleteCallback() { + @Override + public void deleteComplete(Object ctx) { + latch.countDown(); + } - @Override - public void deleteFailed(ManagedLedgerException exception, Object ctx) { - latch.countDown(); - } - }, null); + @Override + public void deleteFailed(ManagedLedgerException exception, Object ctx) { + latch.countDown(); + } + }, null); latch.await(); pos.ackSet = null; } @@ -4382,5 +4487,202 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { ledger.close(); } + + @Test + public void testReadEntriesWithSkipDeletedEntries() throws Exception { + @Cleanup + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testReadEntriesWithSkipDeletedEntries"); + ledger = Mockito.spy(ledger); + List actualReadEntryIds = new ArrayList<>(); + Mockito.doAnswer(inv -> { + long start = inv.getArgument(1); + long end = inv.getArgument(2); + for (long i = start; i <= end; i++) { + actualReadEntryIds.add(i); + } + return inv.callRealMethod(); + }) + .when(ledger) + .asyncReadEntry(Mockito.any(ReadHandle.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.any()); + @Cleanup + ManagedCursor cursor = ledger.openCursor("c"); + + int entries = 20; + Position maxReadPosition = null; + Map map = new HashMap<>(); + for (int i = 0; i < entries; i++) { + maxReadPosition = ledger.addEntry(new byte[1024]); + map.put(i, maxReadPosition); + } + + + Set deletedPositions = new HashSet<>(); + deletedPositions.add(map.get(1)); + deletedPositions.add(map.get(4)); + deletedPositions.add(map.get(5)); + deletedPositions.add(map.get(8)); + deletedPositions.add(map.get(9)); + deletedPositions.add(map.get(10)); + deletedPositions.add(map.get(15)); + deletedPositions.add(map.get(17)); + deletedPositions.add(map.get(19)); + cursor.delete(deletedPositions); + + CompletableFuture f0 = new CompletableFuture<>(); + List readEntries = new ArrayList<>(); + cursor.asyncReadEntries(5, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f0.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f0.completeExceptionally(exception); + } + }, null, PositionImpl.get(maxReadPosition.getLedgerId(), maxReadPosition.getEntryId()).getNext()); + + f0.get(); + + CompletableFuture f1 = new CompletableFuture<>(); + cursor.asyncReadEntries(5, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f1.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f1.completeExceptionally(exception); + } + }, null, PositionImpl.get(maxReadPosition.getLedgerId(), maxReadPosition.getEntryId()).getNext()); + + + f1.get(); + CompletableFuture f2 = new CompletableFuture<>(); + cursor.asyncReadEntries(100, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f2.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f2.completeExceptionally(exception); + } + }, null, PositionImpl.get(maxReadPosition.getLedgerId(), maxReadPosition.getEntryId()).getNext()); + + f2.get(); + + Position cursorReadPosition = cursor.getReadPosition(); + Position expectReadPosition = maxReadPosition.getNext(); + assertTrue(cursorReadPosition.getLedgerId() == expectReadPosition.getLedgerId() + && cursorReadPosition.getEntryId() == expectReadPosition.getEntryId()); + + assertEquals(readEntries.size(), actualReadEntryIds.size()); + assertEquals(entries - deletedPositions.size(), actualReadEntryIds.size()); + for (Entry entry : readEntries) { + long entryId = entry.getEntryId(); + assertTrue(actualReadEntryIds.contains(entryId)); + } + } + + + @Test + public void testReadEntriesWithSkipDeletedEntriesAndWithSkipConditions() throws Exception { + @Cleanup + ManagedLedgerImpl ledger = (ManagedLedgerImpl) + factory.open("testReadEntriesWithSkipDeletedEntriesAndWithSkipConditions"); + ledger = Mockito.spy(ledger); + + List actualReadEntryIds = new ArrayList<>(); + Mockito.doAnswer(inv -> { + long start = inv.getArgument(1); + long end = inv.getArgument(2); + for (long i = start; i <= end; i++) { + actualReadEntryIds.add(i); + } + return inv.callRealMethod(); + }) + .when(ledger) + .asyncReadEntry(Mockito.any(ReadHandle.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.any()); + @Cleanup + ManagedCursor cursor = ledger.openCursor("c"); + + int entries = 20; + Position maxReadPosition0 = null; + Map map = new HashMap<>(); + for (int i = 0; i < entries; i++) { + maxReadPosition0 = ledger.addEntry(new byte[1024]); + map.put(i, maxReadPosition0); + } + + PositionImpl maxReadPosition = + PositionImpl.get(maxReadPosition0.getLedgerId(), maxReadPosition0.getEntryId()).getNext(); + + Set deletedPositions = new HashSet<>(); + deletedPositions.add(map.get(1)); + deletedPositions.add(map.get(3)); + deletedPositions.add(map.get(5)); + cursor.delete(deletedPositions); + + Set skippedPositions = new HashSet<>(); + skippedPositions.add(map.get(6).getEntryId()); + skippedPositions.add(map.get(7).getEntryId()); + skippedPositions.add(map.get(8).getEntryId()); + skippedPositions.add(map.get(11).getEntryId()); + skippedPositions.add(map.get(15).getEntryId()); + skippedPositions.add(map.get(16).getEntryId()); + + Predicate skipCondition = position -> skippedPositions.contains(position.getEntryId()); + List readEntries = new ArrayList<>(); + + CompletableFuture f0 = new CompletableFuture<>(); + cursor.asyncReadEntriesWithSkip(10, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f0.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f0.completeExceptionally(exception); + } + }, null, maxReadPosition, skipCondition); + + f0.get(); + CompletableFuture f1 = new CompletableFuture<>(); + cursor.asyncReadEntriesWithSkip(100, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f1.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f1.completeExceptionally(exception); + } + }, null, maxReadPosition, skipCondition); + f1.get(); + + + assertEquals(actualReadEntryIds.size(), readEntries.size()); + assertEquals(entries - deletedPositions.size() - skippedPositions.size(), actualReadEntryIds.size()); + for (Entry entry : readEntries) { + long entryId = entry.getEntryId(); + assertTrue(actualReadEntryIds.contains(entryId)); + } + + Position cursorReadPosition = cursor.getReadPosition(); + Position expectReadPosition = maxReadPosition; + assertTrue(cursorReadPosition.getLedgerId() == expectReadPosition.getLedgerId() + && cursorReadPosition.getEntryId() == expectReadPosition.getEntryId()); + } + private static final Logger log = LoggerFactory.getLogger(ManagedCursorTest.class); } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java index d49d9ab3e2b6b..8695759c99f62 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryTest.java @@ -20,12 +20,16 @@ import static org.testng.Assert.assertEquals; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerInfo; import org.apache.bookkeeper.mledger.ManagedLedgerInfo.CursorInfo; import org.apache.bookkeeper.mledger.ManagedLedgerInfo.MessageRangeInfo; import org.apache.bookkeeper.test.MockedBookKeeperTestCase; +import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.Test; public class ManagedLedgerFactoryTest extends MockedBookKeeperTestCase { @@ -71,4 +75,43 @@ public void testGetManagedLedgerInfoWithClose() throws Exception { assertEquals(mri.to.entryId, 0); } + /** + * see: https://github.com/apache/pulsar/pull/18688 + */ + @Test + public void testConcurrentCloseLedgerAndSwitchLedgerForReproduceIssue() throws Exception { + String managedLedgerName = "lg_" + UUID.randomUUID().toString().replaceAll("-", "_"); + + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setThrottleMarkDelete(1); + config.setMaximumRolloverTime(Integer.MAX_VALUE, TimeUnit.SECONDS); + config.setMaxEntriesPerLedger(5); + + // create managedLedger once and close it. + ManagedLedgerImpl managedLedger1 = (ManagedLedgerImpl) factory.open(managedLedgerName, config); + waitManagedLedgerStateEquals(managedLedger1, ManagedLedgerImpl.State.LedgerOpened); + managedLedger1.close(); + + // create managedLedger the second time. + ManagedLedgerImpl managedLedger2 = (ManagedLedgerImpl) factory.open(managedLedgerName, config); + waitManagedLedgerStateEquals(managedLedger2, ManagedLedgerImpl.State.LedgerOpened); + + // Mock the task create ledger complete now, it will change the state to another value which not is Closed. + // Close managedLedger1 the second time. + managedLedger1.createComplete(1, null, null); + managedLedger1.close(); + + // Verify managedLedger2 is still there. + Assert.assertFalse(factory.ledgers.isEmpty()); + Assert.assertEquals(factory.ledgers.get(managedLedger2.getName()).join(), managedLedger2); + + // cleanup. + managedLedger2.close(); + } + + private void waitManagedLedgerStateEquals(ManagedLedgerImpl managedLedger, ManagedLedgerImpl.State expectedStat){ + Awaitility.await().untilAsserted(() -> + Assert.assertTrue(managedLedger.getState() == expectedStat)); + } + } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java index dd30cde72e769..34f65dfd00ee8 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java @@ -21,7 +21,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -55,17 +57,21 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.UUID; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.FutureTask; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -88,7 +94,10 @@ import org.apache.bookkeeper.client.PulsarMockBookKeeper; import org.apache.bookkeeper.client.PulsarMockLedgerHandle; import org.apache.bookkeeper.client.api.LedgerEntries; +import org.apache.bookkeeper.client.api.LedgerMetadata; import org.apache.bookkeeper.client.api.ReadHandle; +import org.apache.bookkeeper.common.util.BoundedScheduledExecutorService; +import org.apache.bookkeeper.common.util.OrderedScheduler; import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; @@ -123,6 +132,7 @@ import org.apache.bookkeeper.mledger.util.Futures; import org.apache.bookkeeper.test.MockedBookKeeperTestCase; import org.apache.commons.lang3.exception.ExceptionUtils; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition; @@ -134,6 +144,7 @@ import org.apache.pulsar.metadata.api.extended.SessionEvent; import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.DataProvider; @@ -3084,9 +3095,9 @@ public void testManagedLedgerWithCreateLedgerTimeOut() throws Exception { latch.await(config.getMetadataOperationsTimeoutSeconds() + 2, TimeUnit.SECONDS); assertEquals(response.get(), BKException.Code.TimeoutException); - assertTrue(ctxHolder.get() instanceof AtomicBoolean); - AtomicBoolean ledgerCreated = (AtomicBoolean) ctxHolder.get(); - assertFalse(ledgerCreated.get()); + assertTrue(ctxHolder.get() instanceof CompletableFuture); + CompletableFuture ledgerCreateHook = (CompletableFuture) ctxHolder.get(); + assertTrue(ledgerCreateHook.isCompletedExceptionally()); ledger.close(); } @@ -3388,7 +3399,7 @@ public void openCursorFailed(ManagedLedgerException exception, Object ctx) { @Override public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { } - }, checkOwnershipFlag ? () -> true : null, null); + }, checkOwnershipFlag ? () -> CompletableFuture.completedFuture(true) : null, null); latch.await(); } @@ -3859,12 +3870,26 @@ public void testInactiveLedgerRollOver() throws Exception { ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("rollover_inactive", config); ManagedCursor cursor = ledger.openCursor("c1"); + List ledgerIds = new ArrayList<>(); + int totalAddEntries = 5; for (int i = 0; i < totalAddEntries; i++) { String content = "entry"; // 5 bytes ledger.checkInactiveLedgerAndRollOver(); ledger.addEntry(content.getBytes()); Thread.sleep(inactiveLedgerRollOverTimeMs * 5); + + ledgerIds.add(ledger.currentLedger.getId()); + } + + Map ledgerMap = bkc.getLedgerMap(); + // skip check last ledger, it should be open + for (int i = 0; i < ledgerIds.size() - 1; i++) { + long ledgerId = ledgerIds.get(i); + LedgerMetadata ledgerMetadata = ledgerMap.get(ledgerId).getLedgerMetadata(); + if (ledgerMetadata != null) { + assertTrue(ledgerMetadata.isClosed()); + } } List ledgers = ledger.getLedgersInfoAsList(); @@ -3873,6 +3898,30 @@ public void testInactiveLedgerRollOver() throws Exception { factory.shutdown(); } + @Test + public void testDontRollOverEmptyInactiveLedgers() throws Exception { + int inactiveLedgerRollOverTimeMs = 5; + ManagedLedgerFactoryConfig factoryConf = new ManagedLedgerFactoryConfig(); + @Cleanup("shutdown") + ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setInactiveLedgerRollOverTime(inactiveLedgerRollOverTimeMs, TimeUnit.MILLISECONDS); + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("rollover_inactive", config); + ManagedCursor cursor = ledger.openCursor("c1"); + + long ledgerId = ledger.currentLedger.getId(); + + Thread.sleep(inactiveLedgerRollOverTimeMs * 5); + ledger.checkInactiveLedgerAndRollOver(); + + Thread.sleep(inactiveLedgerRollOverTimeMs * 5); + ledger.checkInactiveLedgerAndRollOver(); + + assertEquals(ledger.currentLedger.getId(), ledgerId); + + ledger.close(); + } + @Test public void testOffloadTaskCancelled() throws Exception { ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); @@ -3953,6 +4002,70 @@ public void testGetEnsemblesAsync() throws Exception { Assert.assertFalse(managedLedger.ledgerCache.containsKey(lastLedger)); } + @Test + public void testIsNoMessagesAfterPos() throws Exception { + final byte[] data = new byte[]{1,2,3}; + final String cursorName = "c1"; + final String mlName = UUID.randomUUID().toString().replaceAll("-", ""); + final ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName); + final ManagedCursor managedCursor = ml.openCursor(cursorName); + + // One ledger. + PositionImpl p1 = (PositionImpl) ml.addEntry(data); + PositionImpl p2 = (PositionImpl) ml.addEntry(data); + PositionImpl p3 = (PositionImpl) ml.addEntry(data); + assertFalse(ml.isNoMessagesAfterPos(p1)); + assertFalse(ml.isNoMessagesAfterPos(p2)); + assertTrue(ml.isNoMessagesAfterPos(p3)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p3.getLedgerId(), p3.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p3.getLedgerId() + 1, -1))); + + // More than one ledger. + ml.ledgerClosed(ml.currentLedger); + PositionImpl p4 = (PositionImpl) ml.addEntry(data); + PositionImpl p5 = (PositionImpl) ml.addEntry(data); + PositionImpl p6 = (PositionImpl) ml.addEntry(data); + assertFalse(ml.isNoMessagesAfterPos(p1)); + assertFalse(ml.isNoMessagesAfterPos(p2)); + assertFalse(ml.isNoMessagesAfterPos(p3)); + assertFalse(ml.isNoMessagesAfterPos(p4)); + assertFalse(ml.isNoMessagesAfterPos(p5)); + assertTrue(ml.isNoMessagesAfterPos(p6)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId(), p6.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId() + 1, -1))); + + // Switch ledger and make the entry id of Last confirmed entry is -1; + ml.ledgerClosed(ml.currentLedger); + ml.createLedgerAfterClosed(); + Awaitility.await().untilAsserted(() -> { + assertEquals(ml.currentLedgerEntries, 0); + }); + ml.lastConfirmedEntry = PositionImpl.get(ml.currentLedger.getId(), -1); + assertFalse(ml.isNoMessagesAfterPos(p5)); + assertTrue(ml.isNoMessagesAfterPos(p6)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId(), p6.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId() + 1, -1))); + + // Trim ledgers to make there is no entries in ML. + ml.deleteCursor(cursorName); + CompletableFuture future = new CompletableFuture<>(); + ml.trimConsumedLedgersInBackground(true, future); + future.get(); + assertEquals(ml.ledgers.size(), 1); + assertEquals(ml.lastConfirmedEntry.getEntryId(), -1); + assertTrue(ml.isNoMessagesAfterPos(p1)); + assertTrue(ml.isNoMessagesAfterPos(p2)); + assertTrue(ml.isNoMessagesAfterPos(p3)); + assertTrue(ml.isNoMessagesAfterPos(p4)); + assertTrue(ml.isNoMessagesAfterPos(p5)); + assertTrue(ml.isNoMessagesAfterPos(p6)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId(), p6.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId() + 1, -1))); + + // cleanup. + ml.close(); + } + @Test public void testGetEstimatedBacklogSize() throws Exception { ManagedLedgerConfig config = new ManagedLedgerConfig(); @@ -3995,4 +4108,127 @@ public void operationFailed(MetaStoreException e) { }); future.join(); } + + @Test + public void testNonDurableCursorCreateForInactiveLedger() throws Exception { + String mlName = "testLedgerInfoMetaCorrectIfAddEntryTimeOut"; + BookKeeper spyBookKeeper = spy(bkc); + ManagedLedgerFactoryImpl factory = new ManagedLedgerFactoryImpl(metadataStore, spyBookKeeper); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setInactiveLedgerRollOverTime(10, TimeUnit.MILLISECONDS); + ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName, config); + ml.addEntry("entry".getBytes(UTF_8)); + + MutableBoolean isRolledOver = new MutableBoolean(false); + retryStrategically((test) -> { + if (isRolledOver.booleanValue()) { + return true; + } + isRolledOver.setValue(ml.checkInactiveLedgerAndRollOver()); + return isRolledOver.booleanValue(); + }, 5, 1000); + assertTrue(isRolledOver.booleanValue()); + + Position Position = new PositionImpl(-1L, -1L); + assertNotNull(ml.newNonDurableCursor(Position)); + } + + /*** + * When a ML tries to create a ledger, it will create a delay task to check if the ledger create request is timeout. + * But we should guarantee that the delay task should be canceled after the ledger create request responded. + */ + @Test + public void testNoOrphanScheduledTasksAfterCloseML() throws Exception { + String mlName = UUID.randomUUID().toString(); + ManagedLedgerFactoryImpl factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMetadataOperationsTimeoutSeconds(3600); + + // Calculate pending task count. + long pendingTaskCountBefore = calculatePendingTaskCount(factory.getScheduledExecutor()); + // Trigger create & close ML 1000 times. + for (int i = 0; i < 1000; i++) { + ManagedLedger ml = factory.open(mlName, config); + ml.close(); + } + // Verify there is no orphan scheduled task. + long pendingTaskCountAfter = calculatePendingTaskCount(factory.getScheduledExecutor()); + // Maybe there are other components also appended scheduled tasks, so leave 100 tasks to avoid flaky. + assertTrue(pendingTaskCountAfter - pendingTaskCountBefore < 100); + } + + /** + * Calculate how many pending tasks in {@link OrderedScheduler} + */ + private long calculatePendingTaskCount(OrderedScheduler orderedScheduler) { + ExecutorService[] threads = WhiteboxImpl.getInternalState(orderedScheduler, "threads"); + long taskCounter = 0; + for (ExecutorService thread : threads) { + BoundedScheduledExecutorService boundedScheduledExecutorService = + WhiteboxImpl.getInternalState(thread, "delegate"); + BlockingQueue queue = WhiteboxImpl.getInternalState(boundedScheduledExecutorService, "queue"); + for (Runnable r : queue) { + if (r instanceof FutureTask) { + FutureTask futureTask = (FutureTask) r; + if (!futureTask.isCancelled() && !futureTask.isDone()) { + taskCounter++; + } + } else { + taskCounter++; + } + } + } + return taskCounter; + } + + @Test + public void testNoCleanupOffloadLedgerWhenMetadataExceptionHappens() throws Exception { + ManagedLedgerConfig config = spy(new ManagedLedgerConfig()); + ManagedLedgerImpl ml = spy((ManagedLedgerImpl) factory.open("testNoCleanupOffloadLedger", config)); + + // mock the ledger offloader + LedgerOffloader ledgerOffloader = mock(NullLedgerOffloader.class); + when(config.getLedgerOffloader()).thenReturn(ledgerOffloader); + when(ledgerOffloader.getOffloadDriverName()).thenReturn("mock"); + + // There will have two put call to the metadata store, the first time is prepare the offload. + // And the second is the complete the offload. This case is testing when completing the offload, + // the metadata store meets an exception. + AtomicInteger metadataPutCallCount = new AtomicInteger(0); + metadataStore.failConditional(new MetadataStoreException("mock completion error"), + (key, value) -> key.equals(FaultInjectionMetadataStore.OperationType.PUT) && + metadataPutCallCount.incrementAndGet() == 2); + + // prepare the arguments for the offloadLoop method + CompletableFuture future = new CompletableFuture<>(); + Queue ledgersToOffload = new LinkedList<>(); + LedgerInfo ledgerInfo = LedgerInfo.getDefaultInstance().toBuilder().setLedgerId(1).setEntries(10).build(); + ledgersToOffload.add(ledgerInfo); + PositionImpl firstUnoffloaded = new PositionImpl(1, 0); + Optional firstError = Optional.empty(); + + // mock the read handle to make the offload successful + CompletableFuture readHandle = new CompletableFuture<>(); + readHandle.complete(mock(ReadHandle.class)); + doReturn(readHandle).when(ml).getLedgerHandle(eq(ledgerInfo.getLedgerId())); + when(ledgerOffloader.offload(any(), any(), anyMap())).thenReturn(CompletableFuture.completedFuture(null)); + + ml.ledgers.put(ledgerInfo.getLedgerId(), ledgerInfo); + + // do the offload + ml.offloadLoop(future, ledgersToOffload, firstUnoffloaded, firstError); + + // waiting for the offload complete + try { + future.join(); + fail("The offload should fail"); + } catch (Exception e) { + // the offload should fail + assertTrue(e.getCause().getMessage().contains("mock completion error")); + } + + // the ledger deletion shouldn't happen + verify(ledgerOffloader, times(0)) + .deleteOffloaded(eq(ledgerInfo.getLedgerId()), any(), anyMap()); + } } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java index 1ad3f5f8de631..1e1f7df0a46d5 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java @@ -80,7 +80,8 @@ void readFromEmptyLedger() throws Exception { entries.forEach(Entry::release); // Test string representation - assertEquals(c1.toString(), "NonDurableCursorImpl{ledger=my_test_ledger, ackPos=3:-1, readPos=3:1}"); + assertEquals(c1.toString(), "NonDurableCursorImpl{ledger=my_test_ledger, cursor=" + + c1.getName() + ", ackPos=3:-1, readPos=3:1}"); } @Test(timeOut = 20000) diff --git a/pom.xml b/pom.xml index 094a35b5ed13a..375087e4f6a54 100644 --- a/pom.xml +++ b/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 pom @@ -28,30 +27,24 @@ apache 29 - - org.apache.pulsar + io.streamnative pulsar - 3.0.0-SNAPSHOT - Pulsar Pulsar is a distributed pub-sub messaging platform with a very flexible messaging model and an intuitive client API. https://github.com/apache/pulsar - Apache Software Foundation https://www.apache.org/ 2017 - Apache Pulsar developers https://pulsar.apache.org/ - Apache License, Version 2.0 @@ -59,41 +52,38 @@ flexible messaging model and an intuitive client API. repo - - https://github.com/apache/pulsar - scm:git:https://github.com/apache/pulsar.git - scm:git:ssh://git@github.com:apache/pulsar.git + https://github.com/streamnative/pulsar + scm:git:https://github.com/streamnative/pulsar.git + scm:git:ssh://git@github.com:streamnative/pulsar.git - GitHub Actions https://github.com/apache/pulsar/actions - Github https://github.com/apache/pulsar/issues - 17 17 ${maven.compiler.target} 8 - - 2.10.1 - + 3.4.0 **/Test*.java,**/*Test.java,**/*Tests.java,**/*TestCase.java quarantine - UTF-8 UTF-8 - ${maven.build.timestamp} + 2024-03-04T21:08:12Z true + + --add-opens java.base/jdk.internal.loader=ALL-UNNAMED @@ -103,13 +93,14 @@ flexible messaging model and an intuitive client API. --add-opens java.base/sun.net=ALL-UNNAMED --add-opens java.management/sun.management=ALL-UNNAMED --add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED + --add-opens java.base/jdk.internal.platform=ALL-UNNAMED true 4 false 1 true - + false ${project.build.directory} @@ -122,20 +113,20 @@ flexible messaging model and an intuitive client API. false package package - - 1.21 - - 4.16.0 - 3.8.1 + 1.26.0 + 4.16.4 + 3.9.1 1.5.0 1.10.0 - 1.1.8.4 - 4.1.12.1 + 1.1.10.5 + + 4.1.12.1 + 5.1.0 - 4.1.89.Final - 0.0.18.Final - 9.4.48.v20220622 + 4.1.100.Final + 0.0.21.Final + 9.4.53.v20231009 2.5.2 2.34 1.10.50 @@ -145,21 +136,23 @@ flexible messaging model and an intuitive client API. 1.7.32 4.4 2.18.0 - 1.69 + 1.75 1.0.6 - 1.0.2.3 - 2.13.4.20221013 + 1.0.2.4 + 2.14.2 0.10.2 - 1.6.2 + 1.6.10 8.37 - 0.40.2 + 0.43.3 true 0.5.0 + 1.14.12 + 1.17 3.19.6 ${protobuf3.version} - 1.45.1 + 1.55.3 1.41.0 - 0.19.0 + 0.26.0 ${grpc.version} 2.8.9 1.2.1 @@ -168,16 +161,16 @@ flexible messaging model and an intuitive client API. 3.11.2 4.4.20 3.4.0 - 5.5.3 + 5.18.0 1.12.262 - 1.10.2 + 1.11.3 2.10.10 2.5.0 5.1.0 - 3.36.0.3 + 3.42.0.0 8.0.11 42.5.1 - 0.3.2-patch11 + 0.4.6 2.7.5 0.4.4-hotfix1 3.3.5 @@ -195,7 +188,7 @@ flexible messaging model and an intuitive client API. 2.10.2 3.3.4 2.4.15 - 31.0.1-jre + 32.1.1-jre 1.0 0.16.1 6.2.8 @@ -215,8 +208,8 @@ flexible messaging model and an intuitive client API. 3.21.0 0.9.1 2.1.0 - 3.18.1 - 1.18.26 + 3.24.2 + 1.18.30 1.3.2 2.3.1 1.2.0 @@ -224,33 +217,31 @@ flexible messaging model and an intuitive client API. 2.3.3 2.0.2 5.12.1 - 12.0.1 + 18.0.0 4.9.3 - 2.8.0 + 3.4.0 - 1.4.32 + 1.6.0 1.0 9.1.6 - 5.3.26 + 5.3.27 4.5.13 4.4.15 - 0.5.11 - 1.32 + 0.7.5 + 2.0 1.10.12 5.3.3 3.4.3 1.5.2-3 2.0.6 - - 1.17.6 + 1.18.3 2.2 - - 3.2.13 + 3.3.0 1.1.1 - 7.7.0 + 7.7.1 3.12.4 3.25.0-GA 1.5.0 @@ -260,28 +251,26 @@ flexible messaging model and an intuitive client API. 1.5.4 5.4.0 2.33.2 - 0.6.1 3.0.0 4.1 1.0 - 3.1.0 + 3.3.0 - - 3.0.0-M3 - 3.4.2 - 3.10.1 - 3.4.0 + 3.1.0 + 3.5.0 + 3.11.0 + 3.5.0 2.3.0 3.4.1 3.1.0 1.1.0 - 1.3.4 + 1.5.0 3.1.2 - 4.0.2 - 3.4.3 + 4.9.10 + 3.5.3 1.7.0 0.8.8 4.7.3.0 @@ -291,24 +280,20 @@ flexible messaging model and an intuitive client API. 0.1.4 1.3 0.4 - 8.1.2 - 0.9.15 + 9.0.7 + 0.9.44 1.6.1 6.4.0 - rename-netty-native-libs.sh - - org.jline jline ${jline3.version} - org.asynchttpclient async-http-client @@ -324,45 +309,39 @@ flexible messaging model and an intuitive client API. - org.testng testng ${testng.version} - - org.yaml - * - + + org.yaml + * + - org.hamcrest hamcrest ${hamcrest.version} test - org.awaitility awaitility ${awaitility.version} test - org.mockito mockito-core ${mockito.version} - org.mockito mockito-inline ${mockito.version} - org.apache.zookeeper zookeeper @@ -401,6 +380,12 @@ flexible messaging model and an intuitive client API. io.dropwizard.metrics metrics-graphite ${dropwizardmetrics.version} + + + com.rabbitmq + amqp-client + + io.dropwizard.metrics @@ -432,7 +417,6 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper bookkeeper-server @@ -461,13 +445,11 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper cpu-affinity ${bookkeeper.version} - io.vertx vertx-core @@ -478,38 +460,33 @@ flexible messaging model and an intuitive client API. vertx-web ${vertx.version} - - org.apache.curator - curator-recipes - ${curator.version} - - - org.apache.zookeeper - * - - + org.apache.curator + curator-recipes + ${curator.version} + + + org.apache.zookeeper + * + + - org.apache.bookkeeper bookkeeper-common-allocator ${bookkeeper.version} - org.apache.bookkeeper bookkeeper-tools-framework ${bookkeeper.version} - org.reflections reflections ${reflections.version} - org.apache.bookkeeper @@ -536,9 +513,12 @@ flexible messaging model and an intuitive client API. com.squareup.okio okio + + jose4j + org.bitbucket.b_c + - org.apache.bookkeeper @@ -571,19 +551,16 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper bookkeeper-common ${bookkeeper.version} - org.apache.bookkeeper.stats bookkeeper-stats-api ${bookkeeper.version} - org.apache.bookkeeper.stats datasketches-metrics-provider @@ -595,37 +572,31 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper.stats prometheus-metrics-provider ${bookkeeper.version} - org.rocksdb rocksdbjni ${rocksdb.version} - org.eclipse.jetty jetty-server ${jetty.version} - org.eclipse.jetty jetty-alpn-conscrypt-server ${jetty.version} - org.conscrypt conscrypt-openjdk-uber ${conscrypt.version} - org.eclipse.jetty jetty-bom @@ -633,7 +604,6 @@ flexible messaging model and an intuitive client API. pom import - io.netty netty-bom @@ -641,7 +611,6 @@ flexible messaging model and an intuitive client API. pom import - io.netty.incubator netty-incubator-transport-classes-io_uring @@ -664,79 +633,66 @@ flexible messaging model and an intuitive client API. ${netty-iouring.version} linux-aarch_64 - com.beust jcommander ${jcommander.version} - com.google.guava guava ${guava.version} - com.google.inject guice ${guice.version} - com.google.inject.extensions guice-assistedinject ${guice.version} - org.apache.commons commons-lang3 ${commons-lang3.version} - org.apache.commons commons-compress ${commons-compress.version} - commons-configuration commons-configuration ${commons-configuration.version} - commons-io commons-io ${commons-io.version} - org.apache.commons commons-text ${commons-text.version} - org.slf4j slf4j-api ${slf4j.version} - org.slf4j slf4j-simple ${slf4j.version} - org.slf4j jcl-over-slf4j ${slf4j.version} - org.apache.logging.log4j log4j-bom @@ -744,49 +700,41 @@ flexible messaging model and an intuitive client API. pom import - commons-codec commons-codec ${commons-codec.version} - org.glassfish.jersey.core jersey-server ${jersey.version} - org.glassfish.jersey.core jersey-client ${jersey.version} - org.glassfish.jersey.inject jersey-hk2 ${jersey.version} - org.glassfish.jersey.containers jersey-container-servlet-core ${jersey.version} - org.glassfish.jersey.containers jersey-container-servlet ${jersey.version} - javax.ws.rs javax.ws.rs-api ${javax.ws.rs-api.version} - org.glassfish.jersey.media jersey-media-json-jackson @@ -798,23 +746,26 @@ flexible messaging model and an intuitive client API. - org.glassfish.jersey.media jersey-media-multipart ${jersey.version} - net.java.dev.jna jna ${jna.version} - - com.github.docker-java - docker-java-core - ${docker-java.version} + com.github.docker-java + docker-java-core + ${docker-java.version} + + + org.bouncycastle + * + + com.github.docker-java @@ -826,7 +777,6 @@ flexible messaging model and an intuitive client API. docker-java-transport-zerodep ${docker-java.version} - com.fasterxml.jackson jackson-bom @@ -834,56 +784,46 @@ flexible messaging model and an intuitive client API. pom import - org.codehaus.jettison jettison ${jettison.version} - com.fasterxml.woodstox woodstox-core ${woodstox.version} - - org.hdrhistogram HdrHistogram ${hdrHistogram.version} - io.swagger swagger-core ${swagger.version} - io.swagger swagger-annotations ${swagger.version} - javax.servlet javax.servlet-api ${javax.servlet-api} - com.github.ben-manes.caffeine caffeine ${caffeine.version} - org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on ${bouncycastle.version} - com.cronutils cron-utils @@ -895,85 +835,88 @@ flexible messaging model and an intuitive client API. - com.yahoo.athenz athenz-zts-java-client-core ${athenz.version} - com.yahoo.athenz athenz-zpe-java-client ${athenz.version} - com.yahoo.athenz athenz-cert-refresher ${athenz.version} + + + org.bouncycastle + * + + + + + com.yahoo.athenz + athenz-auth-core + ${athenz.version} + + + org.bouncycastle + * + + - com.github.zafarkhaja java-semver ${java-semver.version} - io.prometheus simpleclient ${prometheus.version} - io.prometheus simpleclient_hotspot ${prometheus.version} - io.prometheus simpleclient_log4j2 ${prometheus.version} - io.prometheus simpleclient_servlet ${prometheus.version} - io.prometheus simpleclient_jetty ${prometheus.version} - io.prometheus simpleclient_caffeine ${prometheus.version} - com.carrotsearch hppc ${hppc.version} - io.etcd jetcd-core ${jetcd.version} - io.etcd jetcd-test ${jetcd.version} - org.apache.spark spark-streaming_2.10 @@ -1001,7 +944,6 @@ flexible messaging model and an intuitive client API. - io.jsonwebtoken jjwt-api @@ -1017,13 +959,21 @@ flexible messaging model and an intuitive client API. jjwt-jackson ${jsonwebtoken.version} - net.jodah typetools ${typetools.version} - + + net.bytebuddy + byte-buddy + ${byte-buddy.version} + + + org.zeroturnaround + zt-zip + ${zt-zip.version} + io.grpc grpc-bom @@ -1031,7 +981,6 @@ flexible messaging model and an intuitive client API. pom import - io.grpc grpc-all @@ -1055,25 +1004,32 @@ flexible messaging model and an intuitive client API. - + + io.grpc + grpc-xds + ${grpc.version} + + + org.bouncycastle + * + + + com.google.http-client google-http-client ${google-http-client.version} - com.google.http-client google-http-client-jackson2 ${google-http-client.version} - com.google.http-client google-http-client-gson ${google-http-client.version} - io.perfmark perfmark-api @@ -1086,7 +1042,6 @@ flexible messaging model and an intuitive client API. - com.google.protobuf protobuf-bom @@ -1094,19 +1049,16 @@ flexible messaging model and an intuitive client API. pom import - com.google.code.gson gson ${gson.version} - com.yahoo.datasketches sketches-core ${sketches.version} - com.amazonaws aws-java-sdk-bom @@ -1114,7 +1066,6 @@ flexible messaging model and an intuitive client API. pom import - org.apache.distributedlog distributedlog-core @@ -1127,13 +1078,11 @@ flexible messaging model and an intuitive client API. - org.apache.commons commons-collections4 ${commons.collections4.version} - com.lmax @@ -1157,103 +1106,86 @@ flexible messaging model and an intuitive client API. assertj-core ${assertj-core.version} - org.projectlombok lombok ${lombok.version} - javax.annotation javax.annotation-api ${javax.annotation-api.version} - javax.xml.bind jaxb-api ${jaxb-api} - jakarta.xml.bind jakarta.xml.bind-api ${jakarta.xml.bind.version} - com.sun.activation javax.activation ${javax.activation.version} - com.sun.activation jakarta.activation ${jakarta.activation.version} - jakarta.activation jakarta.activation-api ${jakarta.activation.version} - jakarta.validation jakarta.validation-api ${jakarta.validation.version} - io.opencensus opencensus-api ${opencensus.version} - io.opencensus opencensus-contrib-http-util ${opencensus.version} - io.opencensus opencensus-contrib-grpc-metrics ${opencensus.version} - org.opensearch.client opensearch-rest-high-level-client ${opensearch.version} - co.elastic.clients elasticsearch-java ${elasticsearch-java.version} - joda-time joda-time ${joda.version} - org.javassist javassist ${javassist.version} - net.jcip jcip-annotations ${jcip.version} - io.airlift aircompressor @@ -1265,25 +1197,21 @@ flexible messaging model and an intuitive client API. - org.objenesis objenesis ${objenesis.version} - org.apache.httpcomponents httpclient ${apache-http-client.version} - org.apache.httpcomponents httpcore ${apache-httpcomponents.version} - com.github.spotbugs spotbugs-annotations @@ -1291,31 +1219,26 @@ flexible messaging model and an intuitive client API. provided true - com.google.errorprone error_prone_annotations ${errorprone.version} - com.google.j2objc j2objc-annotations ${j2objc-annotations.version} - org.yaml snakeyaml ${snakeyaml.version} - org.apache.ant ant ${ant.version} - com.squareup.okhttp3 okhttp @@ -1336,7 +1259,6 @@ flexible messaging model and an intuitive client API. okio ${okio.version} - org.jetbrains.kotlin kotlin-stdlib @@ -1347,31 +1269,26 @@ flexible messaging model and an intuitive client API. kotlin-stdlib-common ${kotlin-stdlib.version} - org.jetbrains.kotlin kotlin-stdlib-jdk8 ${kotlin-stdlib.version} - com.github.luben zstd-jni ${zstd-jni.version} - - com.typesafe.netty - netty-reactive-streams - ${netty-reactive-streams.version} + com.typesafe.netty + netty-reactive-streams + ${netty-reactive-streams.version} - ch.qos.reload4j reload4j ${reload4j.version} - org.roaringbitmap RoaringBitmap @@ -1384,47 +1301,40 @@ flexible messaging model and an intuitive client API. - - org.apache.pulsar + io.streamnative buildtools ${project.version} test - org.testng testng test - org.mockito mockito-core test - org.mockito mockito-inline test - com.github.stefanbirkner system-lambda ${system-lambda.version} test - org.assertj assertj-core test - org.projectlombok lombok @@ -1435,7 +1345,6 @@ flexible messaging model and an intuitive client API. javax.annotation-api provided - org.apache.bookkeeper @@ -1445,8 +1354,8 @@ flexible messaging model and an intuitive client API. tests - org.bouncycastle - * + org.bouncycastle + * org.slf4j @@ -1466,8 +1375,26 @@ flexible messaging model and an intuitive client API. + + + org.apache.bookkeeper + bookkeeper-common + ${bookkeeper.version} + test + tests + + + com.fasterxml.jackson.core + * + + + + + org.apache.logging.log4j + log4j-layout-template-json + 2.20.0 + - ${project.artifactId} @@ -1482,7 +1409,6 @@ flexible messaging model and an intuitive client API. UTF-8 true true - true false @@ -1535,8 +1461,14 @@ flexible messaging model and an intuitive client API. + + + org.apache.maven.surefire + surefire-testng + ${surefire.version} + + - org.commonjava.maven.plugins directory-maven-plugin @@ -1551,14 +1483,37 @@ flexible messaging model and an intuitive client API. pulsar.basedir - org.apache.pulsar + io.streamnative pulsar - + + pl.project13.maven + git-commit-id-plugin + ${git-commit-id-plugin.version} + + + git-info + + revision + + + + + true + true + git + false + false + false + + true + + + com.mycila license-maven-plugin @@ -1597,7 +1552,7 @@ flexible messaging model and an intuitive client API. **/ByteBufCodedOutputStream.java **/ahc.properties bin/proto/* - conf/schema_example.conf + conf/schema_example.json data/** logs/** **/circe/** @@ -1659,33 +1614,26 @@ flexible messaging model and an intuitive client API. src/assemble/README.bin.txt src/assemble/LICENSE.bin.txt src/assemble/NOTICE.bin.txt - src/main/java/org/apache/bookkeeper/mledger/proto/MLDataFormats.java src/main/java/org/apache/pulsar/broker/service/schema/proto/SchemaRegistryFormat.java bin/proto/MLDataFormats_pb2.py - **/avro/generated/*.java - **/*.avsc - src/main/java/org/apache/pulsar/io/kinesis/fbs/CompressionType.java src/main/java/org/apache/pulsar/io/kinesis/fbs/EncryptionCtx.java src/main/java/org/apache/pulsar/io/kinesis/fbs/EncryptionKey.java src/main/java/org/apache/pulsar/io/kinesis/fbs/KeyValue.java src/main/java/org/apache/pulsar/io/kinesis/fbs/Message.java - src/main/java/org/apache/bookkeeper/mledger/util/AbstractCASReferenceCounted.java - dependency-reduced-pom.xml - pulsar-client-go/go.mod pulsar-client-go/go.sum @@ -1693,15 +1641,12 @@ flexible messaging model and an intuitive client API. pulsar-function-go/go.sum pulsar-function-go/examples/go.mod pulsar-function-go/examples/go.sum - **/META-INF/services/com.scurrilous.circe.HashProvider **/META-INF/services/io.trino.spi.Plugin - **/django/stats/migrations/*.py **/conf/uwsgi_params - **/*.crt **/*.key @@ -1714,21 +1659,16 @@ flexible messaging model and an intuitive client API. certificate-authority/index.txt certificate-authority/serial certificate-authority/README.md - **/zk-3.5-test-data/* - **/requirements.txt - - conf/schema_example.conf + conf/schema_example.json **/templates/*.tpl - **/.helmignore **/_helpers.tpl - **/*.md .github/** @@ -1757,6 +1697,7 @@ flexible messaging model and an intuitive client API. **/*.so.* **/*.dylib src/test/resources/*.txt + **/dependency-reduced-pom.xml @@ -1765,55 +1706,106 @@ flexible messaging model and an intuitive client API. maven-enforcer-plugin ${maven-enforcer-plugin.version} - - enforce-maven - - enforce - - - - - 17 - Java 17+ is required to build Pulsar. - - - 3.6.1 - - - - + + enforce-maven + + enforce + + + + + 17 + Java 17+ is required to build Pulsar. + + + 3.6.1 + + + + - org.apache.maven.plugins - maven-assembly-plugin - ${maven-assembly-plugin.version} - false - - - source-release-assembly-tar-gz - generate-sources - - single - - - ${skipSourceReleaseAssembly} - true - - src/assembly-source-package.xml - - apache-pulsar-${project.version}-src - false - - tar.gz - - posix - - - + org.apache.maven.plugins + maven-assembly-plugin + ${maven-assembly-plugin.version} + false + + + source-release-assembly-tar-gz + generate-sources + + single + + + ${skipSourceReleaseAssembly} + true + + src/assembly-source-package.xml + + apache-pulsar-${project.version}-src + false + + tar.gz + + posix + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.13 + true + + ossrh + https://s01.oss.sonatype.org/ + 60285aee9d4161 + true + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.0 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + + attach-javadocs + + jar + + + none + false + + + + + + org.apache.maven.plugins + maven-release-plugin + 3.0.0-M1 + + true + false + release + deploy + - @@ -1834,10 +1826,10 @@ flexible messaging model and an intuitive client API. maven-surefire-plugin - ${include} + ${include} - **/*$*,${exclude} + **/*$*,${exclude} ${groups} ${excludedGroups} @@ -1921,13 +1913,13 @@ flexible messaging model and an intuitive client API. com.github.spotbugs spotbugs-maven-plugin ${spotbugs-maven-plugin.version} - - - com.github.spotbugs - spotbugs - ${spotbugs.version} - - + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + + org.codehaus.mojo @@ -1939,6 +1931,16 @@ flexible messaging model and an intuitive client API. docker-maven-plugin ${docker-maven.version} + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.13 + + + org.apache.maven.plugins + maven-source-plugin + 3.2.0 + @@ -1954,7 +1956,6 @@ flexible messaging model and an intuitive client API. - @@ -2063,7 +2064,6 @@ flexible messaging model and an intuitive client API. tests - contrib-check - - - - org.apache.rat - apache-rat-plugin - - - - check - - verify - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - check-style - verify - - ${pulsar.basedir}/buildtools/src/main/resources/pulsar/checkstyle.xml - ${pulsar.basedir}/buildtools/src/main/resources/pulsar/suppressions.xml - UTF-8 - **/proto/* - - - check - - - - - - + + + + org.apache.rat + apache-rat-plugin + + + + check + + verify + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + check-style + verify + + ${pulsar.basedir}/buildtools/src/main/resources/pulsar/checkstyle.xml + ${pulsar.basedir}/buildtools/src/main/resources/pulsar/suppressions.xml + UTF-8 + **/proto/* + + + check + + + + + + - windows @@ -2119,7 +2118,6 @@ flexible messaging model and an intuitive client API. rename-netty-native-libs.cmd - @@ -2163,37 +2161,27 @@ flexible messaging model and an intuitive client API. pulsar-broker-auth-sasl pulsar-client-auth-sasl pulsar-config-validation - structured-event-log - pulsar-transaction - pulsar-functions - pulsar-io - bouncy-castle - pulsar-client-messagecrypto-bc - pulsar-metadata jclouds-shaded - pulsar-package-management - distribution docker tests - core-modules @@ -2222,31 +2210,23 @@ flexible messaging model and an intuitive client API. pulsar-broker-auth-sasl pulsar-client-auth-sasl pulsar-config-validation - pulsar-transaction - pulsar-functions - pulsar-io - bouncy-castle - pulsar-client-messagecrypto-bc - distribution pulsar-metadata - pulsar-package-management - - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-broker-auth-athenz jar Athenz authentication plugin for broker - - ${project.groupId} pulsar-broker ${project.version} - ${project.groupId} testmocks ${project.version} test - com.yahoo.athenz athenz-zpe-java-client - + + org.bouncycastle + bcpkix-jdk18on + - @@ -74,7 +69,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -92,7 +86,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -106,7 +99,6 @@ - diff --git a/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java b/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java index 2e062b87a8325..652a922b9a5ad 100644 --- a/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java +++ b/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java @@ -43,6 +43,15 @@ public class AuthenticationProviderAthenz implements AuthenticationProvider { private List domainNameList = null; private int allowedOffset = 30; + public enum ErrorCode { + UNKNOWN, + NO_CLIENT, + NO_TOKEN, + NO_PUBLIC_KEY, + DOMAIN_MISMATCH, + INVALID_TOKEN, + } + @Override public void initialize(ServiceConfiguration config) throws IOException { String domainNames; @@ -81,11 +90,13 @@ public String getAuthMethodName() { public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { SocketAddress clientAddress; String roleToken; + ErrorCode errorCode = ErrorCode.UNKNOWN; try { if (authData.hasDataFromPeer()) { clientAddress = authData.getPeerAddress(); } else { + errorCode = ErrorCode.NO_CLIENT; throw new AuthenticationException("Authentication data source does not have a client address"); } @@ -94,13 +105,16 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat } else if (authData.hasDataFromHttp()) { roleToken = authData.getHttpHeader(AuthZpeClient.ZPE_TOKEN_HDR); } else { + errorCode = ErrorCode.NO_TOKEN; throw new AuthenticationException("Authentication data source does not have a role token"); } if (roleToken == null) { + errorCode = ErrorCode.NO_TOKEN; throw new AuthenticationException("Athenz token is null, can't authenticate"); } if (roleToken.isEmpty()) { + errorCode = ErrorCode.NO_TOKEN; throw new AuthenticationException("Athenz RoleToken is empty, Server is Using Athenz Authentication"); } if (log.isDebugEnabled()) { @@ -110,6 +124,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat RoleToken token = new RoleToken(roleToken); if (!domainNameList.contains(token.getDomain())) { + errorCode = ErrorCode.DOMAIN_MISMATCH; throw new AuthenticationException( String.format("Athenz RoleToken Domain mismatch, Expected: %s, Found: %s", domainNameList.toString(), token.getDomain())); @@ -120,6 +135,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat PublicKey ztsPublicKey = AuthZpeClient.getZtsPublicKey(token.getKeyId()); if (ztsPublicKey == null) { + errorCode = ErrorCode.NO_PUBLIC_KEY; throw new AuthenticationException("Unable to retrieve ZTS Public Key"); } @@ -128,13 +144,13 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); return token.getPrincipal(); } else { + errorCode = ErrorCode.INVALID_TOKEN; throw new AuthenticationException( String.format("Athenz Role Token Not Authenticated from Client: %s", clientAddress)); } } } catch (AuthenticationException exception) { - AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), - exception.getMessage()); + incrementFailureMetric(errorCode); throw exception; } } diff --git a/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml b/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml +++ b/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-broker-auth-oidc/pom.xml b/pulsar-broker-auth-oidc/pom.xml index 9ad0363775a13..0e2da4aa0861d 100644 --- a/pulsar-broker-auth-oidc/pom.xml +++ b/pulsar-broker-auth-oidc/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-broker-auth-oidc jar Open ID Connect authentication plugin for broker - 0.11.5 - - ${project.groupId} pulsar-broker-common @@ -50,29 +44,24 @@ - com.auth0 java-jwt 4.3.0 - com.auth0 jwks-rsa 0.22.0 - com.github.ben-manes.caffeine caffeine - org.asynchttpclient async-http-client - io.kubernetes client-java @@ -83,9 +72,20 @@ io.prometheus simpleclient_httpserver + + bcpkix-jdk18on + org.bouncycastle + + + bcutil-jdk18on + org.bouncycastle + + + bcprov-jdk18on + org.bouncycastle + - io.jsonwebtoken jjwt-api @@ -98,16 +98,13 @@ ${jsonwebtoken.version} test - com.github.tomakehurst wiremock-jre8 ${wiremock.version} test - - @@ -129,8 +126,6 @@ - - @@ -150,7 +145,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java index ae4774b6f6b6b..1462b8e293f79 100644 --- a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java +++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java @@ -52,6 +52,7 @@ import java.util.concurrent.CompletableFuture; import javax.naming.AuthenticationException; import javax.net.ssl.SSLSession; +import org.apache.commons.lang.StringUtils; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationProvider; @@ -133,6 +134,8 @@ public class AuthenticationProviderOpenID implements AuthenticationProvider { static final int CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT = 18 * 60 * 60; static final String CACHE_EXPIRATION_SECONDS = "openIDCacheExpirationSeconds"; static final int CACHE_EXPIRATION_SECONDS_DEFAULT = 24 * 60 * 60; + static final String KEY_ID_CACHE_MISS_REFRESH_SECONDS = "openIDKeyIdCacheMissRefreshSeconds"; + static final int KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT = 5 * 60; static final String HTTP_CONNECTION_TIMEOUT_MILLIS = "openIDHttpConnectionTimeoutMillis"; static final int HTTP_CONNECTION_TIMEOUT_MILLIS_DEFAULT = 10_000; static final String HTTP_READ_TIMEOUT_MILLIS = "openIDHttpReadTimeoutMillis"; @@ -161,7 +164,9 @@ public void initialize(ServiceConfiguration config) throws IOException { int readTimeout = getConfigValueAsInt(config, HTTP_READ_TIMEOUT_MILLIS, HTTP_READ_TIMEOUT_MILLIS_DEFAULT); String trustCertsFilePath = getConfigValueAsString(config, ISSUER_TRUST_CERTS_FILE_PATH, null); SslContext sslContext = null; - if (trustCertsFilePath != null) { + // When config is in the conf file but is empty, it defaults to the empty string, which is not meaningful and + // should be ignored. + if (StringUtils.isNotBlank(trustCertsFilePath)) { // Use default settings for everything but the trust store. sslContext = SslContextBuilder.forClient() .trustManager(new File(trustCertsFilePath)) @@ -447,7 +452,7 @@ DecodedJWT verifyJWT(PublicKey publicKey, } static void incrementFailureMetric(AuthenticationExceptionCode code) { - AuthenticationMetrics.authenticateFailure(SIMPLE_NAME, "token", code.toString()); + AuthenticationMetrics.authenticateFailure(SIMPLE_NAME, AUTH_METHOD_NAME, code); } /** diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java index 12ea7ec6b906d..73934e9c1e05e 100644 --- a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java +++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java @@ -24,6 +24,8 @@ import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT; import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_SIZE; import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_SIZE_DEFAULT; +import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS; +import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT; import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.incrementFailureMetric; import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsInt; import com.auth0.jwk.Jwk; @@ -43,6 +45,7 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import javax.naming.AuthenticationException; import org.apache.pulsar.broker.ServiceConfiguration; @@ -52,7 +55,8 @@ public class JwksCache { // Map from an issuer's JWKS URI to its JWKS. When the Issuer is not empty, use the fallback client. private final AsyncLoadingCache, List> cache; - + private final ConcurrentHashMap, Long> jwksLastRefreshTime = new ConcurrentHashMap<>(); + private final long keyIdCacheMissRefreshNanos; private final ObjectReader reader = new ObjectMapper().readerFor(HashMap.class); private final AsyncHttpClient httpClient; private final OpenidApi openidApi; @@ -61,7 +65,8 @@ public class JwksCache { // Store the clients this.httpClient = httpClient; this.openidApi = apiClient != null ? new OpenidApi(apiClient) : null; - + keyIdCacheMissRefreshNanos = TimeUnit.SECONDS.toNanos(getConfigValueAsInt(config, + KEY_ID_CACHE_MISS_REFRESH_SECONDS, KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT)); // Configure the cache int maxSize = getConfigValueAsInt(config, CACHE_SIZE, CACHE_SIZE_DEFAULT); int refreshAfterWriteSeconds = getConfigValueAsInt(config, CACHE_REFRESH_AFTER_WRITE_SECONDS, @@ -69,6 +74,8 @@ public class JwksCache { int expireAfterSeconds = getConfigValueAsInt(config, CACHE_EXPIRATION_SECONDS, CACHE_EXPIRATION_SECONDS_DEFAULT); AsyncCacheLoader, List> loader = (jwksUri, executor) -> { + // Store the time of the retrieval, even though it might be a little early or the call might fail. + jwksLastRefreshTime.put(jwksUri, System.nanoTime()); if (jwksUri.isPresent()) { return getJwksFromJwksUri(jwksUri.get()); } else { @@ -87,7 +94,37 @@ CompletableFuture getJwk(String jwksUri, String keyId) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY); return CompletableFuture.failedFuture(new IllegalArgumentException("jwksUri must not be null.")); } - return cache.get(Optional.of(jwksUri)).thenApply(jwks -> getJwkForKID(jwks, keyId)); + return getJwkAndMaybeReload(Optional.of(jwksUri), keyId, false); + } + + /** + * Retrieve the JWK for the given key ID from the given JWKS URI. If the key ID is not found, and failOnMissingKeyId + * is false, then the JWK will be reloaded from the JWKS URI and the key ID will be searched for again. + */ + private CompletableFuture getJwkAndMaybeReload(Optional maybeJwksUri, + String keyId, + boolean failOnMissingKeyId) { + return cache + .get(maybeJwksUri) + .thenCompose(jwks -> { + try { + return CompletableFuture.completedFuture(getJwkForKID(maybeJwksUri, jwks, keyId)); + } catch (IllegalArgumentException e) { + if (failOnMissingKeyId) { + throw e; + } else { + Long lastRefresh = jwksLastRefreshTime.get(maybeJwksUri); + if (lastRefresh == null || System.nanoTime() - lastRefresh > keyIdCacheMissRefreshNanos) { + // In this case, the key ID was not found, but we haven't refreshed the JWKS in a while, + // so it is possible the key ID was added. Refresh the JWKS and try again. + cache.synchronous().invalidate(maybeJwksUri); + } + // There is a small race condition where the JWKS could be refreshed by another thread, + // so we retry getting the JWK, even though we might not have invalidated the cache. + return getJwkAndMaybeReload(maybeJwksUri, keyId, true); + } + } + }); } private CompletableFuture> getJwksFromJwksUri(String jwksUri) { @@ -119,8 +156,7 @@ CompletableFuture getJwkFromKubernetesApiServer(String keyId) { return CompletableFuture.failedFuture(new AuthenticationException( "Failed to retrieve public key from Kubernetes API server: Kubernetes fallback is not enabled.")); } - return cache.get(Optional.empty(), (__, executor) -> getJwksFromKubernetesApiServer()) - .thenApply(jwks -> getJwkForKID(jwks, keyId)); + return getJwkAndMaybeReload(Optional.empty(), keyId, false); } private CompletableFuture> getJwksFromKubernetesApiServer() { @@ -130,9 +166,10 @@ private CompletableFuture> getJwksFromKubernetesApiServer() { @Override public void onFailure(ApiException e, int statusCode, Map> responseHeaders) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY); + // We want the message and responseBody here: https://github.com/kubernetes-client/java/issues/2066. future.completeExceptionally( - new AuthenticationException("Failed to retrieve public key from Kubernetes API server: " - + e.getMessage())); + new AuthenticationException("Failed to retrieve public key from Kubernetes API server. " + + "Message: " + e.getMessage() + " Response body: " + e.getResponseBody())); } @Override @@ -169,7 +206,7 @@ public void onDownloadProgress(long bytesRead, long contentLength, boolean done) return future; } - private Jwk getJwkForKID(List jwks, String keyId) { + private Jwk getJwkForKID(Optional maybeJwksUri, List jwks, String keyId) { for (Jwk jwk : jwks) { if (jwk.getId().equals(keyId)) { return jwk; diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/OpenIDProviderMetadataCache.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/OpenIDProviderMetadataCache.java index 33d11f35a349a..111399adbd72b 100644 --- a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/OpenIDProviderMetadataCache.java +++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/OpenIDProviderMetadataCache.java @@ -165,9 +165,10 @@ private CompletableFuture loadOpenIDProviderMetadataForK @Override public void onFailure(ApiException e, int statusCode, Map> responseHeaders) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PROVIDER_METADATA); + // We want the message and responseBody here: https://github.com/kubernetes-client/java/issues/2066. future.completeExceptionally(new AuthenticationException( - "Error retrieving OpenID Provider Metadata from Kubernetes API server: " - + e.getMessage())); + "Error retrieving OpenID Provider Metadata from Kubernetes API server. Message: " + + e.getMessage() + " Response body: " + e.getResponseBody())); } @Override diff --git a/pulsar-broker-auth-oidc/src/test/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenIDIntegrationTest.java b/pulsar-broker-auth-oidc/src/test/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenIDIntegrationTest.java index 298492652c0a8..d2d2de1a1149d 100644 --- a/pulsar-broker-auth-oidc/src/test/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenIDIntegrationTest.java +++ b/pulsar-broker-auth-oidc/src/test/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenIDIntegrationTest.java @@ -29,8 +29,10 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.stubbing.Scenario; import io.jsonwebtoken.SignatureAlgorithm; import io.jsonwebtoken.impl.DefaultJwtBuilder; +import io.jsonwebtoken.io.Decoders; import io.jsonwebtoken.security.Keys; import java.io.IOException; import java.nio.file.Files; @@ -41,16 +43,23 @@ import java.util.Base64; import java.util.Date; import java.util.HashMap; +import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutionException; import javax.naming.AuthenticationException; +import lombok.Cleanup; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataCommand; +import org.apache.pulsar.broker.authentication.AuthenticationProvider; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.AuthenticationService; import org.apache.pulsar.broker.authentication.AuthenticationState; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; import org.apache.pulsar.common.api.AuthData; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; /** @@ -68,6 +77,7 @@ public class AuthenticationProviderOpenIDIntegrationTest { // The valid issuer String issuer; String issuerWithTrailingSlash; + String issuerWithMissingKid; // This issuer is configured to return an issuer in the openid-configuration // that does not match the issuer on the token String issuerThatFails; @@ -83,6 +93,7 @@ void beforeClass() throws IOException { server.start(); issuer = server.baseUrl(); issuerWithTrailingSlash = issuer + "/trailing-slash/"; + issuerWithMissingKid = issuer + "/missing-kid"; issuerThatFails = issuer + "/fail"; issuerK8s = issuer + "/k8s"; @@ -176,6 +187,50 @@ void beforeClass() throws IOException { } """.formatted(validJwk, n, e, invalidJwk)))); + server.stubFor( + get(urlEqualTo("/missing-kid/.well-known/openid-configuration")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(""" + { + "issuer": "%s", + "jwks_uri": "%s/keys" + } + """.formatted(issuerWithMissingKid, issuerWithMissingKid)))); + + // Set up JWKS endpoint where it first responds without the KID, then with the KID. This is a stateful stub. + // Note that the state machine is circular to make it easier to verify the two code paths that rely on + // this logic. + server.stubFor( + get(urlMatching( "/missing-kid/keys")) + .inScenario("Changing KIDs") + .whenScenarioStateIs(Scenario.STARTED) + .willSetStateTo("serve-kid") + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody("{\"keys\":[]}"))); + server.stubFor( + get(urlMatching( "/missing-kid/keys")) + .inScenario("Changing KIDs") + .whenScenarioStateIs("serve-kid") + .willSetStateTo(Scenario.STARTED) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody( + """ + { + "keys" : [ + { + "kid":"%s", + "kty":"RSA", + "alg":"RS256", + "n":"%s", + "e":"%s" + } + ] + } + """.formatted(validJwk, n, e)))); + ServiceConfiguration conf = new ServiceConfiguration(); conf.setAuthenticationEnabled(true); conf.setAuthenticationProviders(Set.of(AuthenticationProviderOpenID.class.getName())); @@ -200,6 +255,12 @@ void afterClass() { server.stop(); } + @BeforeMethod + public void beforeMethod() { + // Scenarios are stateful. Start each test with the correct state. + server.resetScenarios(); + } + @Test public void testTokenWithValidJWK() throws Exception { String role = "superuser"; @@ -261,6 +322,52 @@ public void testTokenWithInvalidIssuer() throws Exception { assertTrue(e.getCause() instanceof AuthenticationException, "Found exception: " + e.getCause()); } } + @Test + public void testKidCacheMissWhenRefreshConfigZero() throws Exception { + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setAuthenticationEnabled(true); + conf.setAuthenticationProviders(Set.of(AuthenticationProviderOpenID.class.getName())); + Properties props = conf.getProperties(); + props.setProperty(AuthenticationProviderOpenID.REQUIRE_HTTPS, "false"); + // Allows us to retrieve the JWK immediately after the cache miss of the KID + props.setProperty(AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS, "0"); + props.setProperty(AuthenticationProviderOpenID.ALLOWED_AUDIENCES, "allowed-audience"); + props.setProperty(AuthenticationProviderOpenID.ALLOWED_TOKEN_ISSUERS, issuerWithMissingKid); + + AuthenticationProviderOpenID provider = new AuthenticationProviderOpenID(); + provider.initialize(conf); + + String role = "superuser"; + String token = generateToken(validJwk, issuerWithMissingKid, role, "allowed-audience", 0L, 0L, 10000L); + assertEquals(role, provider.authenticateAsync(new AuthenticationDataCommand(token)).get()); + } + + @Test + public void testKidCacheMissWhenRefreshConfigLongerThanDelta() throws Exception { + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setAuthenticationEnabled(true); + conf.setAuthenticationProviders(Set.of(AuthenticationProviderOpenID.class.getName())); + Properties props = conf.getProperties(); + props.setProperty(AuthenticationProviderOpenID.REQUIRE_HTTPS, "false"); + // This value is high enough that the provider will not refresh the JWK + props.setProperty(AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS, "100"); + props.setProperty(AuthenticationProviderOpenID.ALLOWED_AUDIENCES, "allowed-audience"); + props.setProperty(AuthenticationProviderOpenID.ALLOWED_TOKEN_ISSUERS, issuerWithMissingKid); + + AuthenticationProviderOpenID provider = new AuthenticationProviderOpenID(); + provider.initialize(conf); + + String role = "superuser"; + String token = generateToken(validJwk, issuerWithMissingKid, role, "allowed-audience", 0L, 0L, 10000L); + try { + provider.authenticateAsync(new AuthenticationDataCommand(token)).get(); + fail("Expected exception"); + } catch (ExecutionException e) { + assertTrue(e.getCause() instanceof IllegalArgumentException, "Found exception: " + e.getCause()); + assertTrue(e.getCause().getMessage().contains("No JWK found for Key ID valid"), + "Found exception: " + e.getCause()); + } + } @Test public void testKubernetesApiServerAsDiscoverTrustedIssuerSuccess() throws Exception { @@ -438,6 +545,56 @@ public void testAuthenticationStateOpenIDForTokenExpiration() throws Exception { assertTrue(state.isExpired()); } + /** + * This test covers the migration scenario where you have both the Token and OpenID providers. It ensures + * both kinds of authentication work. + * @throws Exception + */ + @Test + public void testAuthenticationProviderListStateSuccess() throws Exception { + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setAuthenticationEnabled(true); + conf.setAuthenticationProviders(Set.of(AuthenticationProviderOpenID.class.getName(), + AuthenticationProviderToken.class.getName())); + Properties props = conf.getProperties(); + props.setProperty(AuthenticationProviderOpenID.REQUIRE_HTTPS, "false"); + props.setProperty(AuthenticationProviderOpenID.ALLOWED_AUDIENCES, "allowed-audience"); + props.setProperty(AuthenticationProviderOpenID.ALLOWED_TOKEN_ISSUERS, issuer); + + // Set up static token + KeyPair keyPair = Keys.keyPairFor(SignatureAlgorithm.RS256); + // Use public key for validation + String publicKeyStr = AuthTokenUtils.encodeKeyBase64(keyPair.getPublic()); + props.setProperty("tokenPublicKey", publicKeyStr); + // Use private key to generate token + String privateKeyStr = AuthTokenUtils.encodeKeyBase64(keyPair.getPrivate()); + PrivateKey privateKey = AuthTokenUtils.decodePrivateKey(Decoders.BASE64.decode(privateKeyStr), + SignatureAlgorithm.RS256); + String staticToken = AuthTokenUtils.createToken(privateKey, "superuser", Optional.empty()); + + @Cleanup + AuthenticationService service = new AuthenticationService(conf); + AuthenticationProvider provider = service.getAuthenticationProvider("token"); + + // First, authenticate using OIDC + String role = "superuser"; + String oidcToken = generateToken(validJwk, issuer, role, "allowed-audience", 0L, 0L, 10000L); + assertEquals(role, provider.authenticateAsync(new AuthenticationDataCommand(oidcToken)).get()); + + // Authenticate using the static token + assertEquals("superuser", provider.authenticateAsync(new AuthenticationDataCommand(staticToken)).get()); + + // Use authenticationState to authentication using OIDC + AuthenticationState state1 = service.getAuthenticationProvider("token").newAuthState(null, null, null); + assertNull(state1.authenticateAsync(AuthData.of(oidcToken.getBytes())).get()); + assertEquals(state1.getAuthRole(), role); + + // Use authenticationState to authentication using static token + AuthenticationState state2 = service.getAuthenticationProvider("token").newAuthState(null, null, null); + assertNull(state2.authenticateAsync(AuthData.of(staticToken.getBytes())).get()); + assertEquals(state1.getAuthRole(), role); + } + @Test void ensureRoleClaimForNonSubClaimReturnsRole() throws Exception { AuthenticationProviderOpenID provider = new AuthenticationProviderOpenID(); diff --git a/pulsar-broker-auth-sasl/pom.xml b/pulsar-broker-auth-sasl/pom.xml index 8cde4a2fc4552..87fcbf2f7db56 100644 --- a/pulsar-broker-auth-sasl/pom.xml +++ b/pulsar-broker-auth-sasl/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-broker-auth-sasl jar SASL authentication plugin for broker - - ${project.groupId} pulsar-broker ${project.version} - org.apache.kerby kerby-config @@ -53,7 +47,6 @@ - org.apache.kerby kerb-simplekdc @@ -66,30 +59,25 @@ - ${project.groupId} pulsar-proxy ${project.version} test - ${project.groupId} testmocks ${project.version} test - ${project.groupId} pulsar-client-auth-sasl ${project.version} test - - @@ -111,8 +99,6 @@ - - @@ -132,7 +118,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java b/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java index c979566e485ce..82f760e14b7d6 100644 --- a/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java +++ b/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java @@ -76,7 +76,7 @@ public String verifyAndExtract(String signedStr) throws AuthenticationException String originalSignature = signedStr.substring(index + SIGNATURE.length()); String rawValue = signedStr.substring(0, index); String currentSignature = computeSignature(rawValue); - if (!originalSignature.equals(currentSignature)) { + if (!MessageDigest.isEqual(originalSignature.getBytes(), currentSignature.getBytes())){ throw new AuthenticationException("Invalid signature"); } return rawValue; diff --git a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java index 261efe680f862..f0e45aa734afb 100644 --- a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java +++ b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java @@ -49,6 +49,7 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.auth.AuthenticationSasl; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.proxy.server.ProxyConfiguration; import org.apache.pulsar.proxy.server.ProxyService; import org.slf4j.Logger; @@ -193,15 +194,17 @@ protected void setup() throws Exception { conf.setAuthenticationProviders(providers); conf.setClusterName("test"); conf.setSuperUserRoles(ImmutableSet.of("client/" + localHostname + "@" + kdc.getRealm())); - - super.init(); - - lookupUrl = new URI(pulsar.getBrokerServiceUrl()); - // set admin auth, to verify admin web resources Map clientSaslConfig = new HashMap<>(); clientSaslConfig.put("saslJaasClientSectionName", "PulsarClient"); clientSaslConfig.put("serverType", "broker"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationSasl.class.getName()); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory + .getMapper().getObjectMapper().writeValueAsString(clientSaslConfig)); + + super.init(); + + lookupUrl = new URI(pulsar.getBrokerServiceUrl()); log.info("set client jaas section name: PulsarClient"); admin = PulsarAdmin.builder() .serviceHttpUrl(brokerUrl.toString()) diff --git a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java index 8a0d0392d1333..76c6f023d9a36 100644 --- a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java +++ b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java @@ -53,6 +53,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationSasl; import org.apache.pulsar.common.api.AuthData; import org.apache.pulsar.common.sasl.SaslConstants; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; @@ -180,7 +181,12 @@ protected void setup() throws Exception { conf.setAuthenticationProviders(providers); conf.setClusterName("test"); conf.setSuperUserRoles(ImmutableSet.of("client" + "@" + kdc.getRealm())); - + Map clientSaslConfig = new HashMap<>(); + clientSaslConfig.put("saslJaasClientSectionName", "PulsarClient"); + clientSaslConfig.put("serverType", "broker"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationSasl.class.getName()); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory + .getMapper().getObjectMapper().writeValueAsString(clientSaslConfig)); super.init(); lookupUrl = new URI(pulsar.getWebServiceAddress()); @@ -191,9 +197,6 @@ protected void setup() throws Exception { .authentication(authSasl)); // set admin auth, to verify admin web resources - Map clientSaslConfig = new HashMap<>(); - clientSaslConfig.put("saslJaasClientSectionName", "PulsarClient"); - clientSaslConfig.put("serverType", "broker"); log.info("set client jaas section name: PulsarClient"); admin = PulsarAdmin.builder() .serviceHttpUrl(brokerUrl.toString()) diff --git a/pulsar-broker-common/pom.xml b/pulsar-broker-common/pom.xml index 3e024d14b92a0..44b25ab0cf3e4 100644 --- a/pulsar-broker-common/pom.xml +++ b/pulsar-broker-common/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-broker-common Common classes used in multiple broker modules - ${project.groupId} pulsar-metadata ${project.version} - com.google.guava guava - io.prometheus simpleclient_jetty - javax.servlet javax.servlet-api - javax.ws.rs javax.ws.rs-api - io.jsonwebtoken jjwt-impl - io.jsonwebtoken jjwt-jackson - org.bouncycastle @@ -76,14 +65,12 @@ ${bouncycastle.bc-fips.version} test - org.awaitility awaitility test - @@ -103,7 +90,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -117,7 +103,6 @@ - maven-resources-plugin diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java index e9e350800b44e..983822f22941b 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java @@ -121,8 +121,6 @@ public synchronized void setConf(Configuration conf) { store.registerListener(this::handleUpdates); racksWithHost = bookieMappingCache.get(BOOKIE_INFO_ROOT_PATH).get() .orElseGet(BookiesRackConfiguration::new); - updateRacksWithHost(racksWithHost); - watchAvailableBookies(); for (Map bookieMapping : racksWithHost.values()) { for (String address : bookieMapping.keySet()) { bookieAddressListLastTime.add(BookieId.parse(address)); @@ -132,6 +130,8 @@ public synchronized void setConf(Configuration conf) { bookieAddressListLastTime); } } + updateRacksWithHost(racksWithHost); + watchAvailableBookies(); } catch (InterruptedException | ExecutionException | MetadataException e) { throw new RuntimeException(METADATA_STORE_INSTANCE + " failed to init BookieId list"); } @@ -245,6 +245,7 @@ private void handleUpdates(Notification n) { bookieMappingCache.get(BOOKIE_INFO_ROOT_PATH) .thenAccept(optVal -> { + Set bookieIdSet = new HashSet<>(); synchronized (this) { LOG.info("Bookie rack info updated to {}. Notifying rackaware policy.", optVal); this.updateRacksWithHost(optVal.orElseGet(BookiesRackConfiguration::new)); @@ -259,12 +260,12 @@ private void handleUpdates(Notification n) { LOG.debug("Bookies with rack update from {} to {}", bookieAddressListLastTime, bookieAddressList); } - Set bookieIdSet = new HashSet<>(bookieAddressList); + bookieIdSet.addAll(bookieAddressList); bookieIdSet.addAll(bookieAddressListLastTime); bookieAddressListLastTime = bookieAddressList; - if (rackawarePolicy != null) { - rackawarePolicy.onBookieRackChange(new ArrayList<>(bookieIdSet)); - } + } + if (rackawarePolicy != null) { + rackawarePolicy.onBookieRackChange(new ArrayList<>(bookieIdSet)); } }); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java index 2594798485a20..c0645f87936e5 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java @@ -19,6 +19,7 @@ package org.apache.pulsar.bookie.rackawareness; import static org.apache.pulsar.bookie.rackawareness.BookieRackAffinityMapping.METADATA_STORE_INSTANCE; +import com.google.common.annotations.VisibleForTesting; import io.netty.util.HashedWheelTimer; import java.util.Arrays; import java.util.Collections; @@ -27,7 +28,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException; import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; @@ -61,6 +61,7 @@ public class IsolatedBookieEnsemblePlacementPolicy extends RackawareEnsemblePlac private static final String PULSAR_SYSTEM_TOPIC_ISOLATION_GROUP = "*"; + private volatile BookiesRackConfiguration cachedRackConfiguration = null; public IsolatedBookieEnsemblePlacementPolicy() { super(); @@ -86,7 +87,12 @@ public RackawareEnsemblePlacementPolicyImpl initialize(ClientConfiguration conf, } // Only add the bookieMappingCache if we have defined an isolation group bookieMappingCache = store.getMetadataCache(BookiesRackConfiguration.class); - bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH).join(); + bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH).thenAccept(opt -> opt.ifPresent( + bookiesRackConfiguration -> cachedRackConfiguration = bookiesRackConfiguration)) + .exceptionally(e -> { + log.warn("Failed to load bookies rack configuration while initialize the PlacementPolicy."); + return null; + }); } if (conf.getProperty(SECONDARY_ISOLATION_BOOKIE_GROUPS) != null) { String secondaryIsolationGroupsString = ConfigurationStringUtil @@ -179,25 +185,26 @@ private static Pair, Set> getIsolationGroup( return pair; } - private Set getExcludedBookiesWithIsolationGroups(int ensembleSize, + @VisibleForTesting + Set getExcludedBookiesWithIsolationGroups(int ensembleSize, Pair, Set> isolationGroups) { Set excludedBookies = new HashSet<>(); - if (isolationGroups != null && isolationGroups.getLeft().contains(PULSAR_SYSTEM_TOPIC_ISOLATION_GROUP)) { + if (isolationGroups != null && isolationGroups.getLeft().contains(PULSAR_SYSTEM_TOPIC_ISOLATION_GROUP)) { return excludedBookies; } try { if (bookieMappingCache != null) { - CompletableFuture> future = - bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH); + bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH) + .thenAccept(opt -> cachedRackConfiguration = opt.orElse(null)).exceptionally(e -> { + log.warn("Failed to update the newest bookies rack config."); + return null; + }); - Optional optRes = (future.isDone() && !future.isCompletedExceptionally()) - ? future.join() : Optional.empty(); - - if (optRes.isEmpty()) { + BookiesRackConfiguration allGroupsBookieMapping = cachedRackConfiguration; + if (allGroupsBookieMapping == null) { + log.debug("The bookies rack config is not available at now."); return excludedBookies; } - - BookiesRackConfiguration allGroupsBookieMapping = optRes.get(); Set allBookies = allGroupsBookieMapping.keySet(); int totalAvailableBookiesInPrimaryGroup = 0; Set primaryIsolationGroup = Collections.emptySet(); diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java index b31c86560f848..077228191add1 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java @@ -372,8 +372,9 @@ The delayed message index time step(in seconds) in per bucket snapshot segment, @FieldContext(category = CATEGORY_SERVER, doc = """ The max number of delayed message index bucket, \ - after reaching the max buckets limitation, the adjacent buckets will be merged.""") - private int delayedDeliveryMaxNumBuckets = 50; + after reaching the max buckets limitation, the adjacent buckets will be merged.\ + (disable with value -1)""") + private int delayedDeliveryMaxNumBuckets = -1; @FieldContext(category = CATEGORY_SERVER, doc = "Size of the lookahead window to use " + "when detecting if all the messages in the topic have a fixed delay. " @@ -2743,6 +2744,12 @@ The delayed message index time step(in seconds) in per bucket snapshot segment, ) private long brokerServiceCompactionPhaseOneLoopTimeInSeconds = 30; + @FieldContext( + category = CATEGORY_SERVER, + doc = "Whether retain null-key message during topic compaction." + ) + private boolean topicCompactionRetainNullKey = true; + @FieldContext( category = CATEGORY_SERVER, doc = "Interval between checks to see if cluster is migrated and marks topic migrated " diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java index 69ef526012daa..9a7324a6d077a 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java @@ -71,4 +71,8 @@ public boolean hasSubscription() { public String getSubscription() { return subscription; } + + public AuthenticationDataSource getAuthData() { + return authData; + } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProvider.java index 109259537a494..7862a35b5e871 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProvider.java @@ -30,6 +30,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.authentication.metrics.AuthenticationMetrics; import org.apache.pulsar.common.api.AuthData; import org.apache.pulsar.common.util.FutureUtil; @@ -143,6 +144,10 @@ default CompletableFuture authenticateHttpRequestAsync(HttpServletReque } } + default void incrementFailureMetric(Enum errorCode) { + AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), errorCode); + } + /** * Set response, according to passed in request. * and return whether we should do following chain.doFilter or not. diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java index 3c4759fec7da9..ca5150c9bdb60 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java @@ -46,6 +46,14 @@ public class AuthenticationProviderBasic implements AuthenticationProvider { private static final String CONF_PULSAR_PROPERTY_KEY = "basicAuthConf"; private Map users; + private enum ErrorCode { + UNKNOWN, + EMPTY_AUTH_DATA, + INVALID_HEADER, + INVALID_AUTH_DATA, + INVALID_TOKEN, + } + @Override public void close() throws IOException { // noop @@ -104,9 +112,10 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat String userId = authParams.getUserId(); String password = authParams.getPassword(); String msg = "Unknown user or invalid password"; - + ErrorCode errorCode = ErrorCode.UNKNOWN; try { if (users.get(userId) == null) { + errorCode = ErrorCode.INVALID_AUTH_DATA; throw new AuthenticationException(msg); } @@ -117,15 +126,16 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat List splitEncryptedPassword = Arrays.asList(encryptedPassword.split("\\$")); if (splitEncryptedPassword.size() != 4 || !encryptedPassword .equals(Md5Crypt.apr1Crypt(password.getBytes(), splitEncryptedPassword.get(2)))) { + errorCode = ErrorCode.INVALID_TOKEN; throw new AuthenticationException(msg); } // For crypt algorithm } else if (!encryptedPassword.equals(Crypt.crypt(password.getBytes(), encryptedPassword.substring(0, 2)))) { + errorCode = ErrorCode.INVALID_TOKEN; throw new AuthenticationException(msg); } } catch (AuthenticationException exception) { - AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), - exception.getMessage()); + incrementFailureMetric(errorCode); throw exception; } AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); @@ -144,24 +154,29 @@ public AuthParams(AuthenticationDataSource authData) throws AuthenticationExcept String rawAuthToken = authData.getHttpHeader(HTTP_HEADER_NAME); // parsing and validation if (StringUtils.isBlank(rawAuthToken) || !rawAuthToken.toUpperCase().startsWith("BASIC ")) { + incrementFailureMetric(ErrorCode.INVALID_HEADER); throw new AuthenticationException("Authentication token has to be started with \"Basic \""); } String[] splitRawAuthToken = rawAuthToken.split(" "); if (splitRawAuthToken.length != 2) { + incrementFailureMetric(ErrorCode.INVALID_HEADER); throw new AuthenticationException("Base64 encoded token is not found"); } try { authParams = new String(Base64.getDecoder().decode(splitRawAuthToken[1])); } catch (Exception e) { + incrementFailureMetric(ErrorCode.INVALID_HEADER); throw new AuthenticationException("Base64 decoding is failure: " + e.getMessage()); } } else { + incrementFailureMetric(ErrorCode.EMPTY_AUTH_DATA); throw new AuthenticationException("Authentication data source does not have data"); } String[] parsedAuthParams = authParams.split(":"); if (parsedAuthParams.length != 2) { + incrementFailureMetric(ErrorCode.INVALID_AUTH_DATA); throw new AuthenticationException("Base64 decoded params are invalid"); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java index f8a96aa624e12..663a6253f4460 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java @@ -22,6 +22,7 @@ import java.net.SocketAddress; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; import javax.naming.AuthenticationException; import javax.net.ssl.SSLSession; import javax.servlet.http.HttpServletRequest; @@ -43,9 +44,15 @@ private interface AuthProcessor { } + private enum ErrorCode { + UNKNOWN, + AUTH_REQUIRED, + } + static T applyAuthProcessor(List processors, AuthProcessor authFunc) throws AuthenticationException { AuthenticationException authenticationException = null; + String errorCode = ErrorCode.UNKNOWN.name(); for (W ap : processors) { try { return authFunc.apply(ap); @@ -55,19 +62,19 @@ static T applyAuthProcessor(List processors, AuthProcessor authF } // Store the exception so we can throw it later instead of a generic one authenticationException = ae; + errorCode = ap.getClass().getSimpleName() + "-INVALID-AUTH"; } } if (null == authenticationException) { AuthenticationMetrics.authenticateFailure( AuthenticationProviderList.class.getSimpleName(), - "authentication-provider-list", "Authentication required"); + "authentication-provider-list", ErrorCode.AUTH_REQUIRED); throw new AuthenticationException("Authentication required"); } else { - AuthenticationMetrics.authenticateFailure(AuthenticationProviderList.class.getSimpleName(), - "authentication-provider-list", - authenticationException.getMessage() != null - ? authenticationException.getMessage() : "Authentication required"); + AuthenticationMetrics.authenticateFailure( + AuthenticationProviderList.class.getSimpleName(), + "authentication-provider-list", errorCode); throw authenticationException; } @@ -76,9 +83,12 @@ static T applyAuthProcessor(List processors, AuthProcessor authF private static class AuthenticationListState implements AuthenticationState { private final List states; - private AuthenticationState authState; + private volatile AuthenticationState authState; AuthenticationListState(List states) { + if (states == null || states.isEmpty()) { + throw new IllegalArgumentException("Authentication state requires at least one state"); + } this.states = states; this.authState = states.get(0); } @@ -96,6 +106,61 @@ public String getAuthRole() throws AuthenticationException { return getAuthState().getAuthRole(); } + @Override + public CompletableFuture authenticateAsync(AuthData authData) { + // First, attempt to authenticate with the current auth state + CompletableFuture authChallengeFuture = new CompletableFuture<>(); + authState + .authenticateAsync(authData) + .whenComplete((authChallenge, ex) -> { + if (ex == null) { + // Current authState is still correct. Just need to return the authChallenge. + authChallengeFuture.complete(authChallenge); + } else { + if (log.isDebugEnabled()) { + log.debug("Authentication failed for auth provider " + authState.getClass() + ": ", ex); + } + authenticateRemainingAuthStates(authChallengeFuture, authData, ex, states.size() - 1); + } + }); + return authChallengeFuture; + } + + private void authenticateRemainingAuthStates(CompletableFuture authChallengeFuture, + AuthData clientAuthData, + Throwable previousException, + int index) { + if (index < 0) { + if (previousException == null) { + previousException = new AuthenticationException("Authentication required"); + } + AuthenticationMetrics.authenticateFailure(AuthenticationProviderList.class.getSimpleName(), + "authentication-provider-list", ErrorCode.AUTH_REQUIRED); + authChallengeFuture.completeExceptionally(previousException); + return; + } + AuthenticationState state = states.get(index); + if (state == authState) { + // Skip the current auth state + authenticateRemainingAuthStates(authChallengeFuture, clientAuthData, null, index - 1); + } else { + state.authenticateAsync(clientAuthData) + .whenComplete((authChallenge, ex) -> { + if (ex == null) { + // Found the correct auth state + authState = state; + authChallengeFuture.complete(authChallenge); + } else { + if (log.isDebugEnabled()) { + log.debug("Authentication failed for auth provider " + + authState.getClass() + ": ", ex); + } + authenticateRemainingAuthStates(authChallengeFuture, clientAuthData, ex, index - 1); + } + }); + } + } + @Override public AuthData authenticate(AuthData authData) throws AuthenticationException { return applyAuthProcessor( @@ -160,6 +225,40 @@ public String getAuthMethodName() { return providers.get(0).getAuthMethodName(); } + @Override + public CompletableFuture authenticateAsync(AuthenticationDataSource authData) { + CompletableFuture roleFuture = new CompletableFuture<>(); + authenticateRemainingAuthProviders(roleFuture, authData, null, providers.size() - 1); + return roleFuture; + } + + private void authenticateRemainingAuthProviders(CompletableFuture roleFuture, + AuthenticationDataSource authData, + Throwable previousException, + int index) { + if (index < 0) { + if (previousException == null) { + previousException = new AuthenticationException("Authentication required"); + } + AuthenticationMetrics.authenticateFailure(AuthenticationProviderList.class.getSimpleName(), + "authentication-provider-list", ErrorCode.AUTH_REQUIRED); + roleFuture.completeExceptionally(previousException); + return; + } + AuthenticationProvider provider = providers.get(index); + provider.authenticateAsync(authData) + .whenComplete((role, ex) -> { + if (ex == null) { + roleFuture.complete(role); + } else { + if (log.isDebugEnabled()) { + log.debug("Authentication failed for auth provider " + provider.getClass() + ": ", ex); + } + authenticateRemainingAuthProviders(roleFuture, authData, ex, index - 1); + } + }); + } + @Override public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { return applyAuthProcessor( diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTls.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTls.java index 47e5316bce9ce..a4c44121b4b96 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTls.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTls.java @@ -27,6 +27,12 @@ public class AuthenticationProviderTls implements AuthenticationProvider { + private enum ErrorCode { + UNKNOWN, + INVALID_CERTS, + INVALID_CN, // invalid common name + } + @Override public void close() throws IOException { // noop @@ -45,6 +51,7 @@ public String getAuthMethodName() { @Override public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { String commonName = null; + ErrorCode errorCode = ErrorCode.UNKNOWN; try { if (authData.hasDataFromTls()) { /** @@ -72,6 +79,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat // CN=Steve Kille,O=Isode Limited,C=GB Certificate[] certs = authData.getTlsCertificates(); if (null == certs) { + errorCode = ErrorCode.INVALID_CERTS; throw new AuthenticationException("Failed to get TLS certificates from client"); } String distinguishedName = ((X509Certificate) certs[0]).getSubjectX500Principal().getName(); @@ -85,12 +93,12 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat } if (commonName == null) { + errorCode = ErrorCode.INVALID_CN; throw new AuthenticationException("Client unable to authenticate with TLS certificate"); } AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); } catch (AuthenticationException exception) { - AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), - exception.getMessage()); + incrementFailureMetric(errorCode); throw exception; } return commonName; diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java index 67e1f39a38924..f8992b21ff49f 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java @@ -106,6 +106,12 @@ public class AuthenticationProviderToken implements AuthenticationProvider { private String confTokenAudienceSettingName; private String confTokenAllowedClockSkewSecondsSettingName; + public enum ErrorCode { + INVALID_AUTH_DATA, + INVALID_TOKEN, + INVALID_AUDIENCES, + } + @Override public void close() throws IOException { // noop @@ -158,19 +164,18 @@ public String getAuthMethodName() { @Override public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { + String token; try { // Get Token - String token; token = getToken(authData); - // Parse Token by validating - String role = getPrincipal(authenticateToken(token)); - AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); - return role; } catch (AuthenticationException exception) { - AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), - exception.getMessage()); + incrementFailureMetric(ErrorCode.INVALID_AUTH_DATA); throw exception; } + // Parse Token by validating + String role = getPrincipal(authenticateToken(token)); + AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); + return role; } @Override @@ -241,16 +246,19 @@ private Jwt authenticateToken(final String token) throws Authenticati List audiences = (List) object; // audience not contains this broker, throw exception. if (audiences.stream().noneMatch(audienceInToken -> audienceInToken.equals(audience))) { - throw new AuthenticationException("Audiences in token: [" + String.join(", ", audiences) - + "] not contains this broker: " + audience); + incrementFailureMetric(ErrorCode.INVALID_AUDIENCES); + throw new AuthenticationException("Audiences in token: [" + + String.join(", ", audiences) + "] not contains this broker: " + audience); } } else if (object instanceof String) { if (!object.equals(audience)) { - throw new AuthenticationException("Audiences in token: [" + object - + "] not contains this broker: " + audience); + incrementFailureMetric(ErrorCode.INVALID_AUDIENCES); + throw new AuthenticationException( + "Audiences in token: [" + object + "] not contains this broker: " + audience); } } else { // should not reach here. + incrementFailureMetric(ErrorCode.INVALID_AUDIENCES); throw new AuthenticationException("Audiences in token is not in expected format: " + object); } } @@ -264,6 +272,7 @@ private Jwt authenticateToken(final String token) throws Authenticati if (e instanceof ExpiredJwtException) { expiredTokenMetrics.inc(); } + incrementFailureMetric(ErrorCode.INVALID_TOKEN); throw new AuthenticationException("Failed to authentication token: " + e.getMessage()); } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/metrics/AuthenticationMetrics.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/metrics/AuthenticationMetrics.java index 0a095235f3cb3..5faaccbe15716 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/metrics/AuthenticationMetrics.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/metrics/AuthenticationMetrics.java @@ -43,11 +43,27 @@ public static void authenticateSuccess(String providerName, String authMethod) { /** * Log authenticate failure event to the authentication metrics. + * + * This method is deprecated due to the label "reason" is a potential infinite value. + * @deprecated See {@link #authenticateFailure(String, String, Enum)} ()} + * * @param providerName The short class name of the provider * @param authMethod Authentication method name. * @param reason Failure reason. */ + @Deprecated public static void authenticateFailure(String providerName, String authMethod, String reason) { authFailuresMetrics.labels(providerName, authMethod, reason).inc(); } + + /** + * Log authenticate failure event to the authentication metrics. + * @param providerName The short class name of the provider + * @param authMethod Authentication method name. + * @param errorCode Error code. + */ + public static void authenticateFailure(String providerName, String authMethod, Enum errorCode) { + authFailuresMetrics.labels(providerName, authMethod, errorCode.name()).inc(); + } + } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java index fa613245cfa27..fdab233a51098 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java @@ -35,6 +35,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; @@ -89,15 +90,18 @@ public void initialize(ServiceConfiguration conf, PulsarResources pulsarResource @Override public CompletableFuture isSuperUser(String role, AuthenticationDataSource authenticationData, ServiceConfiguration serviceConfiguration) { - Set roles = getRoles(authenticationData); - if (roles.isEmpty()) { - return CompletableFuture.completedFuture(false); - } + // if superUser role contains in config, return true. Set superUserRoles = serviceConfiguration.getSuperUserRoles(); if (superUserRoles.isEmpty()) { return CompletableFuture.completedFuture(false); } - + if (role != null && superUserRoles.contains(role)) { + return CompletableFuture.completedFuture(true); + } + Set roles = getRoles(role, authenticationData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } return CompletableFuture.completedFuture(roles.stream().anyMatch(superUserRoles::contains)); } @@ -109,7 +113,7 @@ public CompletableFuture validateTenantAdminAccess(String tenantName, S if (isSuperUser) { return CompletableFuture.completedFuture(true); } - Set roles = getRoles(authData); + Set roles = getRoles(role, authData); if (roles.isEmpty()) { return CompletableFuture.completedFuture(false); } @@ -140,7 +144,12 @@ public CompletableFuture validateTenantAdminAccess(String tenantName, S }); } - private Set getRoles(AuthenticationDataSource authData) { + private Set getRoles(String role, AuthenticationDataSource authData) { + if (authData == null || (authData instanceof AuthenticationDataSubscription + && ((AuthenticationDataSubscription) authData).getAuthData() == null)) { + return Collections.singleton(role); + } + String token = null; if (authData.hasDataFromCommand()) { @@ -174,7 +183,14 @@ private Set getRoles(AuthenticationDataSource authData) { Jwt jwt = parser.parseClaimsJwt(unsignedToken); try { - return new HashSet<>(Collections.singletonList(jwt.getBody().get(roleClaim, String.class))); + final String jwtRole = jwt.getBody().get(roleClaim, String.class); + if (jwtRole == null) { + if (log.isDebugEnabled()) { + log.debug("Do not have corresponding claim in jwt token. claim={}", roleClaim); + } + return Collections.emptySet(); + } + return new HashSet<>(Collections.singletonList(jwtRole)); } catch (RequiredTypeException requiredTypeException) { try { List list = jwt.getBody().get(roleClaim, List.class); @@ -189,15 +205,21 @@ private Set getRoles(AuthenticationDataSource authData) { return Collections.emptySet(); } - public CompletableFuture authorize(AuthenticationDataSource authenticationData, Function> authorizeFunc) { - Set roles = getRoles(authenticationData); - if (roles.isEmpty()) { - return CompletableFuture.completedFuture(false); - } - List> futures = new ArrayList<>(roles.size()); - roles.forEach(r -> futures.add(authorizeFunc.apply(r))); - return FutureUtil.waitForAny(futures, ret -> (boolean) ret).thenApply(v -> v.isPresent()); + public CompletableFuture authorize(String role, AuthenticationDataSource authenticationData, + Function> authorizeFunc) { + return isSuperUser(role, authenticationData, conf) + .thenCompose(superUser -> { + if (superUser) { + return CompletableFuture.completedFuture(true); + } + Set roles = getRoles(role, authenticationData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } + List> futures = new ArrayList<>(roles.size()); + roles.forEach(r -> futures.add(authorizeFunc.apply(r))); + return FutureUtil.waitForAny(futures, ret -> (boolean) ret).thenApply(v -> v.isPresent()); + }); } /** @@ -209,7 +231,7 @@ public CompletableFuture authorize(AuthenticationDataSource authenticat @Override public CompletableFuture canProduceAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.canProduceAsync(topicName, r, authenticationData)); + return authorize(role, authenticationData, r -> super.canProduceAsync(topicName, r, authenticationData)); } /** @@ -224,7 +246,7 @@ public CompletableFuture canProduceAsync(TopicName topicName, String ro public CompletableFuture canConsumeAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData, String subscription) { - return authorize(authenticationData, r -> super.canConsumeAsync(topicName, r, authenticationData, + return authorize(role, authenticationData, r -> super.canConsumeAsync(topicName, r, authenticationData, subscription)); } @@ -241,25 +263,27 @@ public CompletableFuture canConsumeAsync(TopicName topicName, String ro @Override public CompletableFuture canLookupAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.canLookupAsync(topicName, r, authenticationData)); + return authorize(role, authenticationData, r -> super.canLookupAsync(topicName, r, authenticationData)); } @Override public CompletableFuture allowFunctionOpsAsync(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.allowFunctionOpsAsync(namespaceName, r, authenticationData)); + return authorize(role, authenticationData, + r -> super.allowFunctionOpsAsync(namespaceName, r, authenticationData)); } @Override public CompletableFuture allowSourceOpsAsync(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.allowSourceOpsAsync(namespaceName, r, authenticationData)); + return authorize(role, authenticationData, + r -> super.allowSourceOpsAsync(namespaceName, r, authenticationData)); } @Override public CompletableFuture allowSinkOpsAsync(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.allowSinkOpsAsync(namespaceName, r, authenticationData)); + return authorize(role, authenticationData, r -> super.allowSinkOpsAsync(namespaceName, r, authenticationData)); } @Override @@ -267,7 +291,7 @@ public CompletableFuture allowTenantOperationAsync(String tenantName, String role, TenantOperation operation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowTenantOperationAsync(tenantName, r, operation, authData)); + return authorize(role, authData, r -> super.allowTenantOperationAsync(tenantName, r, operation, authData)); } @Override @@ -275,7 +299,8 @@ public CompletableFuture allowNamespaceOperationAsync(NamespaceName nam String role, NamespaceOperation operation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowNamespaceOperationAsync(namespaceName, r, operation, authData)); + return authorize(role, authData, + r -> super.allowNamespaceOperationAsync(namespaceName, r, operation, authData)); } @Override @@ -284,8 +309,8 @@ public CompletableFuture allowNamespacePolicyOperationAsync(NamespaceNa PolicyOperation operation, String role, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowNamespacePolicyOperationAsync(namespaceName, policy, operation, r, - authData)); + return authorize(role, authData, + r -> super.allowNamespacePolicyOperationAsync(namespaceName, policy, operation, r, authData)); } @Override @@ -293,7 +318,7 @@ public CompletableFuture allowTopicOperationAsync(TopicName topicName, String role, TopicOperation operation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowTopicOperationAsync(topicName, r, operation, authData)); + return authorize(role, authData, r -> super.allowTopicOperationAsync(topicName, r, operation, authData)); } @Override @@ -302,7 +327,7 @@ public CompletableFuture allowTopicPolicyOperationAsync(TopicName topic PolicyName policyName, PolicyOperation policyOperation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowTopicPolicyOperationAsync(topicName, r, policyName, policyOperation, - authData)); + return authorize(role, authData, + r -> super.allowTopicPolicyOperationAsync(topicName, r, policyName, policyOperation, authData)); } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java index 3f6d38194713e..162c44bec38e0 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java @@ -302,6 +302,9 @@ private CompletableFuture updateSubscriptionPermissionAsync(NamespaceName policies.auth_policies.getSubscriptionAuthentication().get(subscriptionName); if (subscriptionAuth != null) { subscriptionAuth.removeAll(roles); + if (subscriptionAuth.isEmpty()) { + policies.auth_policies.getSubscriptionAuthentication().remove(subscriptionName); + } } else { log.info("[{}] Couldn't find role {} while revoking for sub = {}", namespace, roles, subscriptionName); diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java index 48f8259656729..e35c208c208db 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java @@ -49,16 +49,18 @@ public class NamespaceResources extends BaseResources { private final IsolationPolicyResources isolationPolicies; private final PartitionedTopicResources partitionedTopicResources; private final MetadataStore configurationStore; + private final MetadataStore localStore; public static final String POLICIES_READONLY_FLAG_PATH = "/admin/flags/policies-readonly"; private static final String NAMESPACE_BASE_PATH = "/namespace"; private static final String BUNDLE_DATA_BASE_PATH = "/loadbalance/bundle-data"; - public NamespaceResources(MetadataStore configurationStore, int operationTimeoutSec) { + public NamespaceResources(MetadataStore localStore, MetadataStore configurationStore, int operationTimeoutSec) { super(configurationStore, Policies.class, operationTimeoutSec); this.configurationStore = configurationStore; isolationPolicies = new IsolationPolicyResources(configurationStore, operationTimeoutSec); partitionedTopicResources = new PartitionedTopicResources(configurationStore, operationTimeoutSec); + this.localStore = localStore; } public CompletableFuture> listNamespacesAsync(String tenant) { @@ -117,6 +119,13 @@ public Optional getPolicies(NamespaceName ns) throws MetadataStoreExce return get(joinPath(BASE_POLICIES_PATH, ns.toString())); } + /** + * Get the namespace policy from the metadata cache. This method will not trigger the load of metadata cache. + * + * @deprecated Since this method may introduce inconsistent namespace policies. we should use + * #{@link NamespaceResources#getPoliciesAsync} + */ + @Deprecated public Optional getPoliciesIfCached(NamespaceName ns) { return getCache().getIfCached(joinPath(BASE_POLICIES_PATH, ns.toString())); } @@ -374,13 +383,13 @@ public CompletableFuture runWithMarkDeleteAsync(TopicName topic, // clear resource of `/loadbalance/bundle-data/{tenant}/{namespace}/` in metadata-store public CompletableFuture deleteBundleDataAsync(NamespaceName ns) { final String namespaceBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, ns.toString()); - return getStore().deleteRecursive(namespaceBundlePath); + return this.localStore.deleteRecursive(namespaceBundlePath); } // clear resource of `/loadbalance/bundle-data/{tenant}/` in metadata-store public CompletableFuture deleteBundleDataTenantAsync(String tenant) { final String tenantBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, tenant); - return getStore().deleteRecursive(tenantBundlePath); + return this.localStore.deleteRecursive(tenantBundlePath); } } \ No newline at end of file diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java index dfcd0a4194ff5..a3c5633a6dbe8 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java @@ -60,7 +60,8 @@ public PulsarResources(MetadataStore localMetadataStore, MetadataStore configura if (configurationMetadataStore != null) { tenantResources = new TenantResources(configurationMetadataStore, operationTimeoutSec); clusterResources = new ClusterResources(configurationMetadataStore, operationTimeoutSec); - namespaceResources = new NamespaceResources(configurationMetadataStore, operationTimeoutSec); + namespaceResources = new NamespaceResources(localMetadataStore, configurationMetadataStore + , operationTimeoutSec); resourcegroupResources = new ResourceGroupResources(configurationMetadataStore, operationTimeoutSec); } else { tenantResources = null; diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java index 840ced0a1c1c4..0963f25c3d31f 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java @@ -50,6 +50,9 @@ public TopicResources(MetadataStore store) { store.registerListener(this::handleNotification); } + /*** + * List persistent topics names under a namespace, the topic name contains the partition suffix. + */ public CompletableFuture> listPersistentTopicsAsync(NamespaceName ns) { String path = MANAGED_LEDGER_PATH + "/" + ns + "/persistent"; diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/AuthenticationFilter.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/AuthenticationFilter.java index 6f13185ca7540..0670412e105a0 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/AuthenticationFilter.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/AuthenticationFilter.java @@ -52,19 +52,29 @@ public AuthenticationFilter(AuthenticationService authenticationService) { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { + boolean allowed = false; + Exception authenticationException = null; try { - boolean doFilter = authenticationService + allowed = authenticationService .authenticateHttpRequest((HttpServletRequest) request, (HttpServletResponse) response); - if (doFilter) { - chain.doFilter(request, response); - } } catch (Exception e) { + authenticationException = e; + } + + if (allowed) { + chain.doFilter(request, response); + return; + } + + if (authenticationException != null) { HttpServletResponse httpResponse = (HttpServletResponse) response; httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Authentication required"); - if (e instanceof AuthenticationException) { - LOG.warn("[{}] Failed to authenticate HTTP request: {}", request.getRemoteAddr(), e.getMessage()); + if (authenticationException instanceof AuthenticationException) { + LOG.warn("[{}] Failed to authenticate HTTP request: {}", request.getRemoteAddr(), + authenticationException.getMessage()); } else { - LOG.error("[{}] Error performing authentication for HTTP", request.getRemoteAddr(), e); + LOG.error("[{}] Error performing authentication for HTTP", request.getRemoteAddr(), + authenticationException); } } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java index 0046d28afa444..f6fc42ff0fccf 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java @@ -79,12 +79,16 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti return null; } + String[] additionalServletsList = additionalServlets.split(","); + if (additionalServletsList.length == 0) { + return null; + } + AdditionalServletDefinitions definitions = AdditionalServletUtils.searchForServlets(additionalServletDirectory , narExtractionDirectory); ImmutableMap.Builder builder = ImmutableMap.builder(); - String[] additionalServletsList = additionalServlets.split(","); for (String servletName : additionalServletsList) { AdditionalServletMetadata definition = definitions.servlets().get(servletName); if (null == definition) { @@ -106,7 +110,7 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti } Map servlets = builder.build(); - if (servlets != null && !servlets.isEmpty()) { + if (!servlets.isEmpty()) { return new AdditionalServlets(servlets); } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java index d7be7dabd0db1..9cd8160444249 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java @@ -21,6 +21,7 @@ import static org.apache.bookkeeper.feature.SettableFeatureProvider.DISABLE_ALL; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -28,6 +29,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -35,7 +37,11 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import org.apache.bookkeeper.client.DefaultBookieAddressResolver; import org.apache.bookkeeper.client.EnsemblePlacementPolicy; import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; @@ -46,6 +52,7 @@ import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieNode; import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.net.NetworkTopology; import org.apache.bookkeeper.proto.BookieAddressResolver; import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.stats.StatsLogger; @@ -55,6 +62,8 @@ import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.MetadataStoreFactory; +import org.apache.pulsar.metadata.api.Notification; +import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.bookkeeper.BookieServiceInfoSerde; import org.apache.pulsar.metadata.bookkeeper.PulsarRegistrationClient; import org.awaitility.Awaitility; @@ -254,6 +263,7 @@ public void testWithPulsarRegistrationClient() throws Exception { bkClientConf.getTimeoutTimerNumTicks()); RackawareEnsemblePlacementPolicy repp = new RackawareEnsemblePlacementPolicy(); + mapping.registerRackChangeListener(repp); Class clazz1 = Class.forName("org.apache.bookkeeper.client.TopologyAwareEnsemblePlacementPolicy"); Field field1 = clazz1.getDeclaredField("knownBookies"); field1.setAccessible(true); @@ -323,6 +333,81 @@ public void testWithPulsarRegistrationClient() throws Exception { assertEquals(knownBookies.get(BOOKIE2.toBookieId()).getNetworkLocation(), "/rack1"); assertEquals(knownBookies.get(BOOKIE3.toBookieId()).getNetworkLocation(), "/default-rack"); + //remove bookie2 rack, the bookie2 rack should be /default-rack + data = "{\"group1\": {\"" + BOOKIE1 + + "\": {\"rack\": \"/rack0\", \"hostname\": \"bookie1.example.com\"}}}"; + store.put(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH, data.getBytes(), Optional.empty()).join(); + Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> ((BookiesRackConfiguration)field.get(mapping)).get("group1").size() == 1); + + racks = mapping + .resolve(Lists.newArrayList(BOOKIE1.getHostName(), BOOKIE2.getHostName(), BOOKIE3.getHostName())) + .stream().filter(Objects::nonNull).toList(); + assertEquals(racks.size(), 1); + assertEquals(racks.get(0), "/rack0"); + assertEquals(knownBookies.size(), 3); + assertEquals(knownBookies.get(BOOKIE1.toBookieId()).getNetworkLocation(), "/rack0"); + assertEquals(knownBookies.get(BOOKIE2.toBookieId()).getNetworkLocation(), "/default-rack"); + assertEquals(knownBookies.get(BOOKIE3.toBookieId()).getNetworkLocation(), "/default-rack"); + timer.stop(); } + + @Test + public void testNoDeadlockWithRackawarePolicy() throws Exception { + ClientConfiguration bkClientConf = new ClientConfiguration(); + bkClientConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); + + BookieRackAffinityMapping mapping = new BookieRackAffinityMapping(); + mapping.setBookieAddressResolver(BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + mapping.setConf(bkClientConf); + + @Cleanup("stop") + HashedWheelTimer timer = new HashedWheelTimer(new ThreadFactoryBuilder().setNameFormat("TestTimer-%d").build(), + bkClientConf.getTimeoutTimerTickDurationMs(), TimeUnit.MILLISECONDS, + bkClientConf.getTimeoutTimerNumTicks()); + + RackawareEnsemblePlacementPolicy repp = new RackawareEnsemblePlacementPolicy(); + repp.initialize(bkClientConf, Optional.of(mapping), timer, + DISABLE_ALL, NullStatsLogger.INSTANCE, BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + repp.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK); + + mapping.registerRackChangeListener(repp); + + @Cleanup("shutdownNow") + ExecutorService executor1 = Executors.newSingleThreadExecutor(); + @Cleanup("shutdownNow") + ExecutorService executor2 = Executors.newSingleThreadExecutor(); + + CountDownLatch count = new CountDownLatch(2); + + executor1.submit(() -> { + try { + Method handleUpdates = + BookieRackAffinityMapping.class.getDeclaredMethod("handleUpdates", Notification.class); + handleUpdates.setAccessible(true); + Notification n = + new Notification(NotificationType.Modified, BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH); + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < 2_000) { + handleUpdates.invoke(mapping, n); + } + count.countDown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + executor2.submit(() -> { + Set writableBookies = new HashSet<>(); + writableBookies.add(BOOKIE1.toBookieId()); + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < 2_000) { + repp.onClusterChanged(writableBookies, Collections.emptySet()); + repp.onClusterChanged(Collections.emptySet(), Collections.emptySet()); + } + count.countDown(); + }); + + assertTrue(count.await(3, TimeUnit.SECONDS)); + } } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java index f535ced08f731..beb00197e4e9a 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java @@ -18,11 +18,14 @@ */ package org.apache.pulsar.bookie.rackawareness; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Sets; import io.netty.util.HashedWheelTimer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -34,18 +37,23 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException; import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.feature.SettableFeatureProvider; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.common.policies.data.BookieInfo; +import org.apache.pulsar.common.policies.data.BookiesRackConfiguration; import org.apache.pulsar.common.policies.data.EnsemblePlacementPolicyConfig; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.MetadataStoreFactory; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.cache.impl.MetadataCacheImpl; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -114,6 +122,113 @@ public void testNonRegionBookie() throws Exception { assertFalse(ensemble.contains(new BookieSocketAddress(BOOKIE4).toBookieId())); } + @Test + public void testMetadataStoreCases() throws Exception { + Map mainBookieGroup = new HashMap<>(); + mainBookieGroup.put(BOOKIE1, BookieInfo.builder().rack("rack0").build()); + mainBookieGroup.put(BOOKIE2, BookieInfo.builder().rack("rack1").build()); + mainBookieGroup.put(BOOKIE3, BookieInfo.builder().rack("rack1").build()); + mainBookieGroup.put(BOOKIE4, BookieInfo.builder().rack("rack0").build()); + + Map secondaryBookieGroup = new HashMap<>(); + + store = mock(MetadataStoreExtended.class); + MetadataCacheImpl cache = mock(MetadataCacheImpl.class); + when(store.getMetadataCache(BookiesRackConfiguration.class)).thenReturn(cache); + CompletableFuture> initialFuture = new CompletableFuture<>(); + //The initialFuture only has group1. + BookiesRackConfiguration rackConfiguration1 = new BookiesRackConfiguration(); + rackConfiguration1.put("group1", mainBookieGroup); + rackConfiguration1.put("group2", secondaryBookieGroup); + initialFuture.complete(Optional.of(rackConfiguration1)); + + long waitTime = 2000; + CompletableFuture> waitingCompleteFuture = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + //The waitingCompleteFuture has group1 and group2. + BookiesRackConfiguration rackConfiguration2 = new BookiesRackConfiguration(); + Map mainBookieGroup2 = new HashMap<>(); + mainBookieGroup2.put(BOOKIE1, BookieInfo.builder().rack("rack0").build()); + mainBookieGroup2.put(BOOKIE2, BookieInfo.builder().rack("rack1").build()); + mainBookieGroup2.put(BOOKIE4, BookieInfo.builder().rack("rack0").build()); + + Map secondaryBookieGroup2 = new HashMap<>(); + secondaryBookieGroup2.put(BOOKIE3, BookieInfo.builder().rack("rack0").build()); + rackConfiguration2.put("group1", mainBookieGroup2); + rackConfiguration2.put("group2", secondaryBookieGroup2); + waitingCompleteFuture.complete(Optional.of(rackConfiguration2)); + }).start(); + + long longWaitTime = 4000; + CompletableFuture> emptyFuture = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(longWaitTime); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + //The emptyFuture means that the zk node /bookies already be removed. + emptyFuture.complete(Optional.empty()); + }).start(); + + //Return different future means that cache expire. + when(cache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH)) + .thenReturn(initialFuture).thenReturn(initialFuture) + .thenReturn(waitingCompleteFuture).thenReturn(waitingCompleteFuture) + .thenReturn(emptyFuture).thenReturn(emptyFuture); + + IsolatedBookieEnsemblePlacementPolicy isolationPolicy = new IsolatedBookieEnsemblePlacementPolicy(); + ClientConfiguration bkClientConf = new ClientConfiguration(); + bkClientConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); + bkClientConf.setProperty(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, isolationGroups); + isolationPolicy.initialize(bkClientConf, Optional.empty(), timer, SettableFeatureProvider.DISABLE_ALL, NullStatsLogger.INSTANCE, BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + isolationPolicy.onClusterChanged(writableBookies, readOnlyBookies); + + MutablePair, Set> groups = new MutablePair<>(); + groups.setLeft(Sets.newHashSet("group1")); + groups.setRight(new HashSet<>()); + + //initialFuture, the future is waiting done. + Set blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertTrue(blacklist.isEmpty()); + + //waitingCompleteFuture, the future is waiting done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertTrue(blacklist.isEmpty()); + + Thread.sleep(waitTime); + + //waitingCompleteFuture, the future is already done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertFalse(blacklist.isEmpty()); + assertEquals(blacklist.size(), 1); + BookieId excludeBookie = blacklist.iterator().next(); + assertEquals(excludeBookie.toString(), BOOKIE3); + + //emptyFuture, the future is waiting done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertFalse(blacklist.isEmpty()); + assertEquals(blacklist.size(), 1); + excludeBookie = blacklist.iterator().next(); + assertEquals(excludeBookie.toString(), BOOKIE3); + + Thread.sleep(longWaitTime - waitTime); + + //emptyFuture, the future is already done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertTrue(blacklist.isEmpty()); + } + @Test public void testBasic() throws Exception { Map> bookieMapping = new HashMap<>(); diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderListTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderListTest.java index df011412fee85..7793a5c029f2a 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderListTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderListTest.java @@ -161,6 +161,30 @@ public void testAuthenticate() throws Exception { testAuthenticate(tokenBB, SUBJECT_B); } + private void testAuthenticateAsync(String token, String expectedSubject) throws Exception { + String actualSubject = authProvider.authenticateAsync(new AuthenticationDataSource() { + @Override + public boolean hasDataFromCommand() { + return true; + } + + @Override + public String getCommandData() { + return token; + } + }).get(); + assertEquals(actualSubject, expectedSubject); + } + + @Test + public void testAuthenticateAsync() throws Exception { + testAuthenticateAsync(tokenAA, SUBJECT_A); + testAuthenticateAsync(tokenAB, SUBJECT_B); + testAuthenticateAsync(tokenBA, SUBJECT_A); + testAuthenticateAsync(tokenBB, SUBJECT_B); + } + + private AuthenticationState newAuthState(String token, String expectedSubject) throws Exception { // Must pass the token to the newAuthState for legacy reasons. AuthenticationState authState = authProvider.newAuthState( diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java index 7e329d14307c7..ed9626dffe23f 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java @@ -24,13 +24,16 @@ import io.jsonwebtoken.Jwts; import io.jsonwebtoken.SignatureAlgorithm; import java.util.Properties; +import java.util.function.Function; +import lombok.Cleanup; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; import org.apache.pulsar.broker.resources.PulsarResources; import org.testng.annotations.Test; - import javax.crypto.SecretKey; +import java.util.Set; import java.util.concurrent.CompletableFuture; public class MultiRolesTokenAuthorizationProviderTest { @@ -43,6 +46,8 @@ public void testMultiRolesAuthz() throws Exception { String token = Jwts.builder().claim("sub", new String[]{userA, userB}).signWith(secretKey).compact(); MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -60,18 +65,18 @@ public String getHttpHeader(String name) { } }; - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { if (role.equals(userB)) { return CompletableFuture.completedFuture(true); // only userB has permission } return CompletableFuture.completedFuture(false); }).get()); - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { return CompletableFuture.completedFuture(true); // all users has permission }).get()); - assertFalse(provider.authorize(ads, role -> { + assertFalse(provider.authorize("test", ads, role -> { return CompletableFuture.completedFuture(false); // all users has no permission }).get()); } @@ -82,6 +87,8 @@ public void testMultiRolesAuthzWithEmptyRoles() throws Exception { String token = Jwts.builder().claim("sub", new String[]{}).signWith(secretKey).compact(); MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -99,7 +106,7 @@ public String getHttpHeader(String name) { } }; - assertFalse(provider.authorize(ads, role -> CompletableFuture.completedFuture(false)).get()); + assertFalse(provider.authorize("test", ads, role -> CompletableFuture.completedFuture(false)).get()); } @Test @@ -109,6 +116,8 @@ public void testMultiRolesAuthzWithSingleRole() throws Exception { String token = Jwts.builder().claim("sub", testRole).signWith(secretKey).compact(); MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -126,7 +135,7 @@ public String getHttpHeader(String name) { } }; - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { if (role.equals(testRole)) { return CompletableFuture.completedFuture(true); } @@ -134,11 +143,66 @@ public String getHttpHeader(String name) { }).get()); } + @Test + public void testMultiRolesAuthzWithoutClaim() throws Exception { + final SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + final String testRole = "test-role"; + // broker will use "sub" as the claim by default. + final String token = Jwts.builder() + .claim("whatever", testRole).signWith(secretKey).compact(); + ServiceConfiguration conf = new ServiceConfiguration(); + final MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + provider.initialize(conf, mock(PulsarResources.class)); + final AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + assertFalse(provider.authorize("test", ads, role -> { + if (role == null) { + throw new IllegalStateException("We should avoid pass null to sub providers"); + } + return CompletableFuture.completedFuture(role.equals(testRole)); + }).get()); + } + + @Test + public void testMultiRolesAuthzWithAnonymousUser() throws Exception { + @Cleanup + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + + provider.initialize(conf, mock(PulsarResources.class)); + + Function> authorizeFunc = (String role) -> { + if (role.equals("test-role")) { + return CompletableFuture.completedFuture(true); + } + return CompletableFuture.completedFuture(false); + }; + assertTrue(provider.authorize("test-role", null, authorizeFunc).get()); + assertFalse(provider.authorize("test-role-x", null, authorizeFunc).get()); + assertTrue(provider.authorize("test-role", new AuthenticationDataSubscription(null, "test-sub"), authorizeFunc).get()); + } + @Test public void testMultiRolesNotFailNonJWT() throws Exception { String token = "a-non-jwt-token"; MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -156,7 +220,7 @@ public String getHttpHeader(String name) { } }; - assertFalse(provider.authorize(ads, role -> CompletableFuture.completedFuture(false)).get()); + assertFalse(provider.authorize("test", ads, role -> CompletableFuture.completedFuture(false)).get()); } @Test @@ -191,11 +255,51 @@ public String getHttpHeader(String name) { } }; - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { if (role.equals(testRole)) { return CompletableFuture.completedFuture(true); } return CompletableFuture.completedFuture(false); }).get()); } + + @Test + public void testMultiRolesAuthzWithSuperUser() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + String testAdminRole = "admin"; + String token = Jwts.builder().claim("sub", testAdminRole).signWith(secretKey).compact(); + + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setSuperUserRoles(Set.of(testAdminRole)); + + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + provider.initialize(conf, mock(PulsarResources.class)); + + AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + assertTrue(provider.isSuperUser(testAdminRole, ads, conf).get()); + Function> authorizeFunc = (String role) -> { + if (role.equals("admin1")) { + return CompletableFuture.completedFuture(true); + } + return CompletableFuture.completedFuture(false); + }; + assertTrue(provider.authorize(testAdminRole, ads, (String role) -> CompletableFuture.completedFuture(false)).get()); + assertTrue(provider.authorize("admin1", null, authorizeFunc).get()); + assertFalse(provider.authorize("admin2", null, authorizeFunc).get()); + } } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java index 85f54a76dc3c4..deb86e1802f6f 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java @@ -18,12 +18,34 @@ */ package org.apache.pulsar.broker.resources; +import static org.apache.pulsar.broker.resources.BaseResources.joinPath; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; + +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; public class NamespaceResourcesTest { + + private MetadataStore localStore; + private MetadataStore configurationStore; + private NamespaceResources namespaceResources; + + private static final String BUNDLE_DATA_BASE_PATH = "/loadbalance/bundle-data"; + + @BeforeMethod + public void setup() { + localStore = mock(MetadataStore.class); + configurationStore = mock(MetadataStore.class); + namespaceResources = new NamespaceResources(localStore, configurationStore, 30); + } + @Test public void test_pathIsFromNamespace() { assertFalse(NamespaceResources.pathIsFromNamespace("/admin/clusters")); @@ -31,4 +53,26 @@ public void test_pathIsFromNamespace() { assertFalse(NamespaceResources.pathIsFromNamespace("/admin/policies/my-tenant")); assertTrue(NamespaceResources.pathIsFromNamespace("/admin/policies/my-tenant/my-ns")); } + + /** + * Test that the bundle-data node is deleted from the local stores. + */ + @Test + public void testDeleteBundleDataAsync() { + NamespaceName ns = NamespaceName.get("my-tenant/my-ns"); + String namespaceBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, ns.toString()); + namespaceResources.deleteBundleDataAsync(ns); + + String tenant="my-tenant"; + String tenantBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, tenant); + namespaceResources.deleteBundleDataTenantAsync(tenant); + + verify(localStore).deleteRecursive(namespaceBundlePath); + verify(localStore).deleteRecursive(tenantBundlePath); + + assertThrows(()-> verify(configurationStore).deleteRecursive(namespaceBundlePath)); + assertThrows(()-> verify(configurationStore).deleteRecursive(tenantBundlePath)); + } + + } \ No newline at end of file diff --git a/pulsar-broker/pom.xml b/pulsar-broker/pom.xml index 7442d95e467e6..19165c87ffc39 100644 --- a/pulsar-broker/pom.xml +++ b/pulsar-broker/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-broker jar Pulsar Broker - commons-codec commons-codec - org.apache.commons commons-collections4 - org.apache.commons commons-lang3 - org.slf4j slf4j-api - io.netty netty-transport - com.google.protobuf protobuf-java - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-websocket ${project.version} - ${project.groupId} pulsar-client-admin-original ${project.version} - ${project.groupId} managed-ledger ${project.version} - org.apache.curator curator-recipes - org.apache.bookkeeper stream-storage-server @@ -119,234 +105,196 @@ - org.apache.bookkeeper bookkeeper-tools-framework - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-transaction-common ${project.version} - ${project.groupId} pulsar-io-batch-discovery-triggerers ${project.version} test - ${project.groupId} testmocks ${project.version} test - + + com.github.tomakehurst + wiremock-jre8 + ${wiremock.version} + test + - io.dropwizard.metrics - metrics-core + io.dropwizard.metrics + metrics-core - - org.xerial.snappy - snappy-java + org.xerial.snappy + snappy-java - - ${project.groupId} pulsar-functions-worker ${project.version} - ${project.groupId} pulsar-functions-local-runner-original ${project.version} test - ${project.groupId} pulsar-client-messagecrypto-bc ${project.version} - org.awaitility awaitility test - - org.eclipse.jetty jetty-server - org.eclipse.jetty jetty-alpn-conscrypt-server - org.eclipse.jetty jetty-servlet - org.eclipse.jetty jetty-servlets - org.glassfish.jersey.core jersey-server - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.containers jersey-container-servlet - org.glassfish.jersey.media jersey-media-json-jackson - org.glassfish.jersey.test-framework jersey-test-framework-core test ${jersey.version} - org.glassfish.jersey.test-framework.providers jersey-test-framework-provider-grizzly2 test ${jersey.version} - jakarta.activation jakarta.activation-api - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - org.glassfish.jersey.inject jersey-hk2 - com.fasterxml.jackson.module jackson-module-jsonSchema - org.slf4j jcl-over-slf4j - com.google.guava guava - com.beust jcommander - io.swagger swagger-annotations - io.prometheus simpleclient - io.prometheus simpleclient_jetty - io.prometheus simpleclient_hotspot - io.prometheus simpleclient_caffeine - io.swagger swagger-core - org.hdrhistogram HdrHistogram - com.google.code.gson gson - com.github.zafarkhaja java-semver - org.apache.avro avro ${avro.version} - com.carrotsearch hppc - org.roaringbitmap RoaringBitmap - com.github.oshi oshi-core-java11 - ${project.groupId} pulsar-functions-api-examples @@ -354,7 +302,6 @@ pom test - ${project.groupId} pulsar-functions-api-examples-builtin @@ -362,7 +309,6 @@ pom test - ${project.groupId} pulsar-io-batch-data-generator @@ -370,7 +316,6 @@ pom test - ${project.groupId} pulsar-io-data-generator @@ -378,7 +323,6 @@ pom test - ${project.groupId} pulsar-metadata @@ -386,7 +330,6 @@ test-jar test - javax.xml.bind jaxb-api @@ -397,42 +340,33 @@ - com.sun.activation javax.activation - - ${project.groupId} pulsar-transaction-coordinator ${project.version} - - ${project.groupId} pulsar-package-core ${project.version} - io.etcd jetcd-test test - ${project.groupId} pulsar-package-filesystem-storage ${project.version} - - @@ -455,7 +389,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -486,7 +419,6 @@ - maven-dependency-plugin @@ -539,7 +471,6 @@ - org.apache.maven.plugins maven-surefire-plugin @@ -555,17 +486,18 @@ - org.xolstice.maven.plugins protobuf-maven-plugin ${protobuf-maven-plugin.version} + com.google.protobuf:protoc:${protoc3.version}:exe:${os.detected.classifier} true **/ResourceUsage.proto **/TransactionPendingAck.proto + **/DelayedMessageIndexBucketSegment.proto @@ -610,6 +542,7 @@ ${project.basedir}/src/main/proto/TransactionPendingAck.proto ${project.basedir}/src/main/proto/ResourceUsage.proto + ${project.basedir}/src/main/proto/DelayedMessageIndexBucketSegment.proto generated-sources/lightproto/java generated-sources/lightproto/java @@ -622,7 +555,6 @@ - maven-resources-plugin @@ -653,7 +585,6 @@ - @@ -672,7 +603,6 @@ test-jar test - ${project.groupId} pulsar-package-core @@ -680,7 +610,6 @@ test-jar test - ${project.groupId} pulsar-metadata diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java index 0badbda1afdfd..9b757c55ccd1d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarClusterMetadataSetup.java @@ -385,10 +385,19 @@ static void createNamespaceIfAbsent(PulsarResources resources, NamespaceName nam namespaceResources.createPolicies(namespaceName, policies); } else { log.info("Namespace {} already exists.", namespaceName); - namespaceResources.setPolicies(namespaceName, policies -> { - policies.replication_clusters.add(cluster); - return policies; - }); + var replicaClusterFound = false; + var policiesOptional = namespaceResources.getPolicies(namespaceName); + if (policiesOptional.isPresent() && policiesOptional.get().replication_clusters.contains(cluster)) { + replicaClusterFound = true; + } + if (!replicaClusterFound) { + namespaceResources.setPolicies(namespaceName, policies -> { + policies.replication_clusters.add(cluster); + return policies; + }); + log.info("Updated namespace:{} policies. Added the replication cluster:{}", + namespaceName, cluster); + } } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java index 0259dfc7a58cf..1674930778d09 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java @@ -220,7 +220,7 @@ static void setDefaultEnsemblePlacementPolicy( } } - private void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfiguration conf, MetadataStore store, + static void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfiguration conf, MetadataStore store, Class policyClass) { bkConf.setEnsemblePlacementPolicy(policyClass); bkConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); @@ -228,6 +228,9 @@ private void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfi bkConf.setProperty(REPP_DNS_RESOLVER_CLASS, conf.getProperties().getProperty(REPP_DNS_RESOLVER_CLASS, BookieRackAffinityMapping.class.getName())); + bkConf.setMinNumRacksPerWriteQuorum(conf.getBookkeeperClientMinNumRacksPerWriteQuorum()); + bkConf.setEnforceMinNumRacksPerWriteQuorum(conf.isBookkeeperClientEnforceMinNumRacksPerWriteQuorum()); + bkConf.setProperty(NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, conf.getProperties().getProperty( NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java index aad7d32f732c5..320d586e76e18 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java @@ -173,7 +173,6 @@ import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreProvider; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStoreProvider; import org.apache.pulsar.websocket.WebSocketConsumerServlet; -import org.apache.pulsar.websocket.WebSocketPingPongServlet; import org.apache.pulsar.websocket.WebSocketProducerServlet; import org.apache.pulsar.websocket.WebSocketReaderServlet; import org.apache.pulsar.websocket.WebSocketService; @@ -272,6 +271,7 @@ public class PulsarService implements AutoCloseable, ShutdownService { private TransactionPendingAckStoreProvider transactionPendingAckStoreProvider; private final ExecutorProvider transactionExecutorProvider; + private String brokerId; public enum State { Init, Started, Closing, Closed @@ -306,6 +306,7 @@ public PulsarService(ServiceConfiguration config, // Validate correctness of configuration PulsarConfigurationLoader.isComplete(config); TransactionBatchedWriteValidator.validate(config); + this.config = config; // validate `advertisedAddress`, `advertisedListeners`, `internalListenerName` this.advertisedListeners = MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); @@ -316,7 +317,6 @@ public PulsarService(ServiceConfiguration config, // use `internalListenerName` listener as `advertisedAddress` this.bindAddress = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getBindAddress()); this.brokerVersion = PulsarVersion.getVersion(); - this.config = config; this.processTerminator = processTerminator; this.loadManagerExecutor = Executors .newSingleThreadScheduledExecutor(new ExecutorProvider.ExtendedThreadFactory("pulsar-load-manager")); @@ -382,6 +382,17 @@ public void closeMetadataServiceSession() throws Exception { localMetadataStore.close(); } + private void closeLeaderElectionService() throws Exception { + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { + ExtensibleLoadManagerImpl.get(loadManager.get()).getLeaderElectionService().close(); + } else { + if (this.leaderElectionService != null) { + this.leaderElectionService.close(); + this.leaderElectionService = null; + } + } + } + @Override public void close() throws PulsarServerException { try { @@ -467,6 +478,12 @@ public CompletableFuture closeAsync() { protocolHandlers = null; } + // cancel loadShedding task and shutdown the loadManager executor before shutting down the broker + if (this.loadSheddingTask != null) { + this.loadSheddingTask.cancel(); + } + executorServicesShutdown.shutdown(loadManagerExecutor); + List> asyncCloseFutures = new ArrayList<>(); if (this.brokerService != null) { CompletableFuture brokerCloseFuture = this.brokerService.closeAsync(); @@ -496,16 +513,7 @@ public CompletableFuture closeAsync() { this.bkClientFactory = null; } - if (this.leaderElectionService != null) { - this.leaderElectionService.close(); - this.leaderElectionService = null; - } - - // cancel loadShedding task and shutdown the loadManager executor before shutting down the broker - if (this.loadSheddingTask != null) { - this.loadSheddingTask.cancel(); - } - executorServicesShutdown.shutdown(loadManagerExecutor); + closeLeaderElectionService(); if (adminClient != null) { adminClient.close(); @@ -625,14 +633,18 @@ private synchronized void resetMetricsServlet() { } private CompletableFuture addTimeoutHandling(CompletableFuture future) { + long brokerShutdownTimeoutMs = getConfiguration().getBrokerShutdownTimeoutMs(); + if (brokerShutdownTimeoutMs <= 0) { + return future; + } ScheduledExecutorService shutdownExecutor = Executors.newSingleThreadScheduledExecutor( new ExecutorProvider.ExtendedThreadFactory(getClass().getSimpleName() + "-shutdown")); FutureUtil.addTimeoutHandling(future, - Duration.ofMillis(Math.max(1L, getConfiguration().getBrokerShutdownTimeoutMs())), + Duration.ofMillis(brokerShutdownTimeoutMs), shutdownExecutor, () -> FutureUtil.createTimeoutException("Timeout in close", getClass(), "close")); future.handle((v, t) -> { - if (t != null && getConfiguration().getBrokerShutdownTimeoutMs() > 0) { - LOG.info("Shutdown timed out after {} ms", getConfiguration().getBrokerShutdownTimeoutMs()); + if (t instanceof TimeoutException) { + LOG.info("Shutdown timed out after {} ms", brokerShutdownTimeoutMs); LOG.info(ThreadDumpUtil.buildThreadDiagnosticString()); } // shutdown the shutdown executor @@ -780,7 +792,7 @@ public void start() throws PulsarServerException { exposeTopicMetrics, offloaderScheduler, interval); this.defaultOffloader = createManagedLedgerOffloader(defaultOffloadPolicies); - this.brokerInterceptor = BrokerInterceptors.load(config); + setBrokerInterceptor(newBrokerInterceptor()); // use getter to support mocking getBrokerInterceptor method in tests BrokerInterceptor interceptor = getBrokerInterceptor(); if (interceptor != null) { @@ -809,6 +821,12 @@ public void start() throws PulsarServerException { this.brokerServiceUrl = brokerUrl(config); this.brokerServiceUrlTls = brokerUrlTls(config); + // the broker id is used in the load manager to identify the broker + // it should not be used for making connections to the broker + this.brokerId = + String.format("%s:%s", advertisedAddress, config.getWebServicePort() + .or(config::getWebServicePortTls).orElseThrow()); + if (null != this.webSocketService) { ClusterDataImpl clusterData = ClusterDataImpl.builder() @@ -923,6 +941,10 @@ public void start() throws PulsarServerException { } } + protected BrokerInterceptor newBrokerInterceptor() throws IOException { + return BrokerInterceptors.load(config); + } + @VisibleForTesting protected OrderedExecutor newOrderedExecutor() { return OrderedExecutor.newBuilder() @@ -1064,17 +1086,11 @@ private void addWebSocketServiceHandler(WebService webService, new ServletHolder(readerWebSocketServlet), true, attributeMap); webService.addServlet(WebSocketReaderServlet.SERVLET_PATH_V2, new ServletHolder(readerWebSocketServlet), true, attributeMap); - - final WebSocketServlet pingPongWebSocketServlet = new WebSocketPingPongServlet(webSocketService); - webService.addServlet(WebSocketPingPongServlet.SERVLET_PATH, - new ServletHolder(pingPongWebSocketServlet), true, attributeMap); - webService.addServlet(WebSocketPingPongServlet.SERVLET_PATH_V2, - new ServletHolder(pingPongWebSocketServlet), true, attributeMap); } } private void handleDeleteCluster(Notification notification) { - if (ClusterResources.pathRepresentsClusterName(notification.getPath()) + if (isRunning() && ClusterResources.pathRepresentsClusterName(notification.getPath()) && notification.getType() == NotificationType.Deleted) { final String clusterName = ClusterResources.clusterNameFromPath(notification.getPath()); getBrokerService().closeAndRemoveReplicationClient(clusterName); @@ -1112,7 +1128,8 @@ protected void startLeaderElectionService() { LOG.info("The load manager extension is enabled. Skipping PulsarService LeaderElectionService."); return; } - this.leaderElectionService = new LeaderElectionService(coordinationService, getSafeWebServiceAddress(), + this.leaderElectionService = + new LeaderElectionService(coordinationService, getBrokerId(), getSafeWebServiceAddress(), state -> { if (state == LeaderElectionState.Leading) { LOG.info("This broker was elected leader"); @@ -1160,7 +1177,7 @@ protected void startLeaderElectionService() { protected void acquireSLANamespace() { try { // Namespace not created hence no need to unload it - NamespaceName nsName = NamespaceService.getSLAMonitorNamespace(getAdvertisedAddress(), config); + NamespaceName nsName = NamespaceService.getSLAMonitorNamespace(getBrokerId(), config); if (!this.pulsarResources.getNamespaceResources().namespaceExists(nsName)) { LOG.info("SLA Namespace = {} doesn't exist.", nsName); return; @@ -1316,7 +1333,11 @@ public boolean isRunning() { * @return a reference of the current LeaderElectionService instance. */ public LeaderElectionService getLeaderElectionService() { - return this.leaderElectionService; + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { + return ExtensibleLoadManagerImpl.get(loadManager.get()).getLeaderElectionService(); + } else { + return this.leaderElectionService; + } } /** @@ -1721,18 +1742,24 @@ public static String webAddressTls(String host, int port) { } public String getSafeWebServiceAddress() { - return webServiceAddress != null ? webServiceAddress : webServiceAddressTls; + return webServiceAddressTls != null ? webServiceAddressTls : webServiceAddress; } @Deprecated public String getSafeBrokerServiceUrl() { - return brokerServiceUrl != null ? brokerServiceUrl : brokerServiceUrlTls; + return brokerServiceUrlTls != null ? brokerServiceUrlTls : brokerServiceUrl; } - public String getLookupServiceAddress() { - return String.format("%s:%s", advertisedAddress, config.getWebServicePort().isPresent() - ? config.getWebServicePort().get() - : config.getWebServicePortTls().get()); + /** + * Return the broker id. The broker id is used in the load manager to uniquely identify the broker at runtime. + * It should not be used for making connections to the broker. The broker id is available after {@link #start()} + * has been called. + * + * @return broker id + */ + public String getBrokerId() { + return Objects.requireNonNull(brokerId, + "brokerId is not initialized before start has been called"); } public TopicPoliciesService getTopicPoliciesService() { @@ -1859,7 +1886,8 @@ public static WorkerConfig initializeWorkerConfigFromBrokerConfig(ServiceConfigu workerConfig.setTlsAllowInsecureConnection(brokerConfig.isTlsAllowInsecureConnection()); workerConfig.setTlsEnabled(brokerConfig.isTlsEnabled()); workerConfig.setTlsEnableHostnameVerification(brokerConfig.isTlsHostnameVerificationEnabled()); - workerConfig.setBrokerClientTrustCertsFilePath(brokerConfig.getTlsTrustCertsFilePath()); + workerConfig.setBrokerClientTrustCertsFilePath(brokerConfig.getBrokerClientTrustCertsFilePath()); + workerConfig.setTlsTrustCertsFilePath(brokerConfig.getTlsTrustCertsFilePath()); // client in worker will use this config to authenticate with broker workerConfig.setBrokerClientAuthenticationPlugin(brokerConfig.getBrokerClientAuthenticationPlugin()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java index 3e3b044ec51b8..35aa7cc2fdd26 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java @@ -169,7 +169,8 @@ public CompletableFuture handleTcClientConnect(TransactionCoordinatorID tc tcLoadSemaphore.release(); })).exceptionally(e -> { internalPinnedExecutor.execute(() -> { - completableFuture.completeExceptionally(e.getCause()); + Throwable realCause = FutureUtil.unwrapCompletionException(e); + completableFuture.completeExceptionally(realCause); // release before handle request queue, //in order to client reconnect infinite loop tcLoadSemaphore.release(); @@ -180,7 +181,7 @@ public CompletableFuture handleTcClientConnect(TransactionCoordinatorID tc CompletableFuture future = deque.poll(); if (future != null) { // this means that this tc client connection connect fail - future.completeExceptionally(e); + future.completeExceptionally(realCause); } else { break; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java index 3328fa9715b99..57650758bbc70 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java @@ -26,6 +26,7 @@ import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -34,6 +35,7 @@ import java.util.Objects; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import javax.ws.rs.DELETE; import javax.ws.rs.DefaultValue; @@ -81,12 +83,18 @@ public class BrokersBase extends AdminResource { // log a full thread dump when a deadlock is detected in healthcheck once every 10 minutes // to prevent excessive logging private static final long LOG_THREADDUMP_INTERVAL_WHEN_DEADLOCK_DETECTED = 600000L; + // there is a timeout of 60 seconds default in the client(readTimeoutMs), so we need to set the timeout + // a bit shorter than 60 seconds to avoid the client timeout exception thrown before the server timeout exception. + // or we can't propagate the server timeout exception to the client. + private static final Duration HEALTH_CHECK_READ_TIMEOUT = Duration.ofSeconds(58); + private static final TimeoutException HEALTH_CHECK_TIMEOUT_EXCEPTION = + FutureUtil.createTimeoutException("Timeout", BrokersBase.class, "healthCheckRecursiveReadNext(...)"); private volatile long threadDumpLoggedTimestamp; @GET @Path("/{cluster}") @ApiOperation( - value = "Get the list of active brokers (web service addresses) in the cluster." + value = "Get the list of active brokers (broker ids) in the cluster." + "If authorization is not enabled, any cluster name is valid.", response = String.class, responseContainer = "Set") @@ -116,7 +124,7 @@ public void getActiveBrokers(@Suspended final AsyncResponse asyncResponse, @GET @ApiOperation( - value = "Get the list of active brokers (web service addresses) in the local cluster." + value = "Get the list of active brokers (broker ids) in the local cluster." + "If authorization is not enabled", response = String.class, responseContainer = "Set") @@ -142,7 +150,9 @@ public void getLeaderBroker(@Suspended final AsyncResponse asyncResponse) { validateSuperUserAccessAsync().thenAccept(__ -> { LeaderBroker leaderBroker = pulsar().getLeaderElectionService().getCurrentLeader() .orElseThrow(() -> new RestException(Status.NOT_FOUND, "Couldn't find leader broker")); - BrokerInfo brokerInfo = BrokerInfo.builder().serviceUrl(leaderBroker.getServiceUrl()).build(); + BrokerInfo brokerInfo = BrokerInfo.builder() + .serviceUrl(leaderBroker.getServiceUrl()) + .brokerId(leaderBroker.getBrokerId()).build(); LOG.info("[{}] Successfully to get the information of the leader broker.", clientAppId()); asyncResponse.resume(brokerInfo); }) @@ -154,8 +164,8 @@ public void getLeaderBroker(@Suspended final AsyncResponse asyncResponse) { } @GET - @Path("/{clusterName}/{broker-webserviceurl}/ownedNamespaces") - @ApiOperation(value = "Get the list of namespaces served by the specific broker", + @Path("/{clusterName}/{brokerId}/ownedNamespaces") + @ApiOperation(value = "Get the list of namespaces served by the specific broker id", response = NamespaceOwnershipStatus.class, responseContainer = "Map") @ApiResponses(value = { @ApiResponse(code = 307, message = "Current broker doesn't serve the cluster"), @@ -163,9 +173,9 @@ public void getLeaderBroker(@Suspended final AsyncResponse asyncResponse) { @ApiResponse(code = 404, message = "Cluster doesn't exist") }) public void getOwnedNamespaces(@Suspended final AsyncResponse asyncResponse, @PathParam("clusterName") String cluster, - @PathParam("broker-webserviceurl") String broker) { + @PathParam("brokerId") String brokerId) { validateSuperUserAccessAsync() - .thenAccept(__ -> validateBrokerName(broker)) + .thenCompose(__ -> maybeRedirectToBroker(brokerId)) .thenCompose(__ -> validateClusterOwnershipAsync(cluster)) .thenCompose(__ -> pulsar().getNamespaceService().getOwnedNameSpacesStatusAsync()) .thenAccept(asyncResponse::resume) @@ -173,7 +183,7 @@ public void getOwnedNamespaces(@Suspended final AsyncResponse asyncResponse, // If the exception is not redirect exception we need to log it. if (!isRedirectException(ex)) { LOG.error("[{}] Failed to get the namespace ownership status. cluster={}, broker={}", - clientAppId(), cluster, broker); + clientAppId(), cluster, brokerId); } resumeAsyncResponseExceptionally(asyncResponse, ex); return null; @@ -397,9 +407,10 @@ private void checkDeadlockedThreads() { private CompletableFuture internalRunHealthCheck(TopicVersion topicVersion) { + String brokerId = pulsar().getBrokerId(); NamespaceName namespaceName = (topicVersion == TopicVersion.V2) - ? NamespaceService.getHeartbeatNamespaceV2(pulsar().getAdvertisedAddress(), pulsar().getConfiguration()) - : NamespaceService.getHeartbeatNamespace(pulsar().getAdvertisedAddress(), pulsar().getConfiguration()); + ? NamespaceService.getHeartbeatNamespaceV2(brokerId, pulsar().getConfiguration()) + : NamespaceService.getHeartbeatNamespace(brokerId, pulsar().getConfiguration()); final String topicName = String.format("persistent://%s/%s", namespaceName, HEALTH_CHECK_TOPIC_SUFFIX); LOG.info("[{}] Running healthCheck with topic={}", clientAppId(), topicName); final String messageStr = UUID.randomUUID().toString(); @@ -432,7 +443,10 @@ private CompletableFuture internalRunHealthCheck(TopicVersion topicVersion }); throw FutureUtil.wrapToCompletionException(createException); }).thenCompose(reader -> producer.sendAsync(messageStr) - .thenCompose(__ -> healthCheckRecursiveReadNext(reader, messageStr)) + .thenCompose(__ -> FutureUtil.addTimeoutHandling( + healthCheckRecursiveReadNext(reader, messageStr), + HEALTH_CHECK_READ_TIMEOUT, pulsar().getBrokerService().executor(), + () -> HEALTH_CHECK_TIMEOUT_EXCEPTION)) .whenComplete((__, ex) -> { closeAndReCheck(producer, reader, topicOptional.get(), subscriptionName) .whenComplete((unused, innerEx) -> { @@ -546,16 +560,26 @@ public void shutDownBrokerGracefully( @ApiParam(name = "maxConcurrentUnloadPerSec", value = "if the value absent(value=0) means no concurrent limitation.") @QueryParam("maxConcurrentUnloadPerSec") int maxConcurrentUnloadPerSec, - @QueryParam("forcedTerminateTopic") @DefaultValue("true") boolean forcedTerminateTopic + @QueryParam("forcedTerminateTopic") @DefaultValue("true") boolean forcedTerminateTopic, + @Suspended final AsyncResponse asyncResponse ) { validateSuperUserAccess(); - doShutDownBrokerGracefully(maxConcurrentUnloadPerSec, forcedTerminateTopic); + doShutDownBrokerGracefullyAsync(maxConcurrentUnloadPerSec, forcedTerminateTopic) + .thenAccept(__ -> { + LOG.info("[{}] Successfully shutdown broker gracefully", clientAppId()); + asyncResponse.resume(Response.noContent().build()); + }) + .exceptionally(ex -> { + LOG.error("[{}] Failed to shutdown broker gracefully", clientAppId(), ex); + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); } - private void doShutDownBrokerGracefully(int maxConcurrentUnloadPerSec, - boolean forcedTerminateTopic) { + private CompletableFuture doShutDownBrokerGracefullyAsync(int maxConcurrentUnloadPerSec, + boolean forcedTerminateTopic) { pulsar().getBrokerService().unloadNamespaceBundlesGracefully(maxConcurrentUnloadPerSec, forcedTerminateTopic); - pulsar().closeAsync(); + return pulsar().closeAsync(); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/ClustersBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/ClustersBase.java index e61a7f20d1d67..5d4ed54c33466 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/ClustersBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/ClustersBase.java @@ -162,6 +162,9 @@ public void createCluster( .thenCompose(__ -> validatePoliciesReadOnlyAccessAsync()) .thenCompose(__ -> { NamedEntity.checkName(cluster); + if (clusterData == null) { + throw new RestException(Status.BAD_REQUEST, "cluster data is required"); + } try { clusterData.checkPropertiesIfPresent(); } catch (IllegalArgumentException ex) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java index 19f8d7c437ded..46b1712e7dd02 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java @@ -234,14 +234,14 @@ private void internalRetryableDeleteNamespaceAsync0(boolean force, int retryTime })) .thenCompose(topics -> { List allTopics = topics.get(0); - ArrayList allUserCreatedTopics = new ArrayList<>(); + Set allUserCreatedTopics = new HashSet<>(); List allPartitionedTopics = topics.get(1); - ArrayList allUserCreatedPartitionTopics = new ArrayList<>(); + Set allUserCreatedPartitionTopics = new HashSet<>(); boolean hasNonSystemTopic = false; - List allSystemTopics = new ArrayList<>(); - List allPartitionedSystemTopics = new ArrayList<>(); - List topicPolicy = new ArrayList<>(); - List partitionedTopicPolicy = new ArrayList<>(); + Set allSystemTopics = new HashSet<>(); + Set allPartitionedSystemTopics = new HashSet<>(); + Set topicPolicy = new HashSet<>(); + Set partitionedTopicPolicy = new HashSet<>(); for (String topic : allTopics) { if (!pulsar().getBrokerService().isSystemTopic(TopicName.get(topic))) { hasNonSystemTopic = true; @@ -349,7 +349,7 @@ private boolean isDeletedAlongWithUserCreatedTopic(String topic) { return topic.endsWith(SystemTopicNames.PENDING_ACK_STORE_SUFFIX); } - private CompletableFuture internalDeletePartitionedTopicsAsync(List topicNames) { + private CompletableFuture internalDeletePartitionedTopicsAsync(Set topicNames) { if (CollectionUtils.isEmpty(topicNames)) { return CompletableFuture.completedFuture(null); } @@ -363,7 +363,7 @@ private CompletableFuture internalDeletePartitionedTopicsAsync(List internalDeleteTopicsAsync(List topicNames) { + private CompletableFuture internalDeleteTopicsAsync(Set topicNames) { if (CollectionUtils.isEmpty(topicNames)) { return CompletableFuture.completedFuture(null); } @@ -691,7 +691,9 @@ protected CompletableFuture internalSetNamespaceReplicationClusters(List validatePoliciesReadOnlyAccessAsync()) .thenApply(__ -> { - checkNotNull(clusterIds, "ClusterIds should not be null"); + if (CollectionUtils.isEmpty(clusterIds)) { + throw new RestException(Status.PRECONDITION_FAILED, "ClusterIds should not be null or empty"); + } if (!namespaceName.isGlobal() && !(clusterIds.size() == 1 && clusterIds.get(0).equals(pulsar().getConfiguration().getClusterName()))) { throw new RestException(Status.PRECONDITION_FAILED, @@ -883,7 +885,7 @@ protected void internalSetBookieAffinityGroup(BookieAffinityGroupData bookieAffi policies -> new LocalPolicies(policies.bundles, bookieAffinityGroup, policies.namespaceAntiAffinityGroup)) - .orElseGet(() -> new LocalPolicies(defaultBundle(), + .orElseGet(() -> new LocalPolicies(getBundles(config().getDefaultNumberOfNamespaceBundles()), bookieAffinityGroup, null)); log.info("[{}] Successfully updated local-policies configuration: namespace={}, map={}", clientAppId(), @@ -944,9 +946,9 @@ private CompletableFuture validateLeaderBrokerAsync() { return FutureUtil.failedFuture(new RestException(Response.Status.PRECONDITION_FAILED, errorStr)); } LeaderBroker leaderBroker = pulsar().getLeaderElectionService().getCurrentLeader().get(); - String leaderBrokerUrl = leaderBroker.getServiceUrl(); + String leaderBrokerId = leaderBroker.getBrokerId(); return pulsar().getNamespaceService() - .createLookupResult(leaderBrokerUrl, false, null) + .createLookupResult(leaderBrokerId, false, null) .thenCompose(lookupResult -> { String redirectUrl = isRequestHttps() ? lookupResult.getLookupData().getHttpUrlTls() : lookupResult.getLookupData().getHttpUrl(); @@ -969,7 +971,7 @@ private CompletableFuture validateLeaderBrokerAsync() { return FutureUtil.failedFuture(( new WebApplicationException(Response.temporaryRedirect(redirect).build()))); } catch (MalformedURLException exception) { - log.error("The leader broker url is malformed - {}", leaderBrokerUrl); + log.error("The redirect url is malformed - {}", redirectUrl); return FutureUtil.failedFuture(new RestException(exception)); } }); @@ -1005,8 +1007,11 @@ public CompletableFuture setNamespaceBundleAffinityAsync(String bundleRang } public CompletableFuture internalUnloadNamespaceBundleAsync(String bundleRange, - String destinationBroker, + String destinationBrokerParam, boolean authoritative) { + String destinationBroker = StringUtils.isBlank(destinationBrokerParam) ? null : + // ensure backward compatibility: strip the possible http:// or https:// prefix + destinationBrokerParam.replaceFirst("http[s]?://", ""); return validateSuperUserAccessAsync() .thenCompose(__ -> setNamespaceBundleAffinityAsync(bundleRange, destinationBroker)) .thenAccept(__ -> { @@ -1347,6 +1352,7 @@ protected CompletableFuture setBacklogQuotaAsync(BacklogQuotaType backlogQ "Backlog Quota exceeds configured retention quota for namespace." + " Please increase retention quota and retry"); } + policies.backlog_quota_map.put(quotaType, quota); return policies; }); } @@ -1710,7 +1716,8 @@ protected String internalGetNamespaceAntiAffinityGroup() { try { return getLocalPolicies() .getLocalPolicies(namespaceName) - .orElse(new LocalPolicies()).namespaceAntiAffinityGroup; + .orElseGet(() -> new LocalPolicies(getBundles(config().getDefaultNumberOfNamespaceBundles()) + , null, null)).namespaceAntiAffinityGroup; } catch (Exception e) { log.error("[{}] Failed to get the antiAffinityGroup of namespace {}", clientAppId(), namespaceName, e); throw new RestException(Status.NOT_FOUND, "Couldn't find namespace policies"); @@ -1760,7 +1767,9 @@ protected List internalGetAntiAffinityNamespaces(String cluster, String throw new RuntimeException(e); } - String storedAntiAffinityGroup = policies.orElse(new LocalPolicies()).namespaceAntiAffinityGroup; + String storedAntiAffinityGroup = policies.orElseGet(() -> + new LocalPolicies(getBundles(config().getDefaultNumberOfNamespaceBundles()), + null, null)).namespaceAntiAffinityGroup; return antiAffinityGroup.equalsIgnoreCase(storedAntiAffinityGroup); }).collect(Collectors.toList()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java index 7347d6dbf20a2..62493fdafa96a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java @@ -18,8 +18,10 @@ */ package org.apache.pulsar.broker.admin.impl; +import static org.apache.pulsar.common.naming.SystemTopicNames.isSystemTopic; import static org.apache.pulsar.common.naming.SystemTopicNames.isTransactionCoordinatorAssign; import static org.apache.pulsar.common.naming.SystemTopicNames.isTransactionInternalName; +import static org.apache.pulsar.common.naming.TopicName.PARTITIONED_TOPIC_SUFFIX; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectReader; import com.github.zafarkhaja.semver.Version; @@ -144,6 +146,7 @@ import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.apache.pulsar.compaction.Compactor; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.slf4j.Logger; @@ -697,7 +700,11 @@ private CompletableFuture internalUpdateNonPartitionedTopicProperties(Map< @Override public void updatePropertiesComplete(Map properties, Object ctx) { + if (managedLedger.getConfig().getProperties() == null) { + managedLedger.getConfig().setProperties(new HashMap<>()); + } managedLedger.getConfig().getProperties().putAll(properties); + future.complete(null); } @@ -788,7 +795,9 @@ protected void internalDeletePartitionedTopic(AsyncResponse asyncResponse, .thenCompose(unused -> internalRemovePartitionsTopicAsync(numPartitions, force)); }) // Only tries to delete the znode for partitioned topic when all its partitions are successfully deleted - ).thenCompose(__ -> getPulsarResources().getNamespaceResources().getPartitionedTopicResources() + ).thenCompose(ignore -> + pulsar().getBrokerService().deleteSchema(topicName).exceptionally(ex -> null)) + .thenCompose(__ -> getPulsarResources().getNamespaceResources().getPartitionedTopicResources() .runWithMarkDeleteAsync(topicName, () -> namespaceResources() .getPartitionedTopicResources().deletePartitionedTopicAsync(topicName))) .thenAccept(__ -> { @@ -1171,6 +1180,21 @@ protected CompletableFuture internalDeleteTopicAsync(boolean authoritative .thenCompose(__ -> pulsar().getBrokerService().deleteTopic(topicName.toString(), force)); } + /** + * There has a known bug will make Pulsar misidentifies "tp-partition-0-DLQ-partition-0" as "tp-partition-0-DLQ". + * You can see the details from PR https://github.com/apache/pulsar/pull/19841. + * This method is a quick fix and will be removed in master branch after #19841 and PIP 263 are done. + */ + private boolean isUnexpectedTopicName(PartitionedTopicMetadata topicMetadata) { + if (!topicName.toString().contains(PARTITIONED_TOPIC_SUFFIX)){ + return false; + } + if (topicMetadata.partitions <= 0){ + return false; + } + return topicName.getPartition(0).toString().equals(topicName.toString()); + } + protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean authoritative) { CompletableFuture future; if (topicName.isGlobal()) { @@ -1188,7 +1212,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut } else { getPartitionedTopicMetadataAsync(topicName, authoritative, false) .thenAccept(partitionMetadata -> { - if (partitionMetadata.partitions > 0) { + if (partitionMetadata.partitions > 0 && !isUnexpectedTopicName(partitionMetadata)) { try { final Set subscriptions = Collections.newSetFromMap( @@ -1610,7 +1634,9 @@ protected CompletableFuture internalDeleteSubscriptionAsync(String subName future = CompletableFuture.completedFuture(null); } - return future.thenCompose(__ -> { + return future + .thenCompose((__) -> validateTopicOperationAsync(topicName, TopicOperation.UNSUBSCRIBE, subName)) + .thenCompose(__ -> { if (topicName.isPartitioned()) { return internalDeleteSubscriptionForNonPartitionedTopicAsync(subName, authoritative, force); } else { @@ -1653,11 +1679,11 @@ protected CompletableFuture internalDeleteSubscriptionAsync(String subName }); } + // Note: this method expects the caller to check authorization private CompletableFuture internalDeleteSubscriptionForNonPartitionedTopicAsync(String subName, boolean authoritative, boolean force) { return validateTopicOwnershipAsync(topicName, authoritative) - .thenCompose((__) -> validateTopicOperationAsync(topicName, TopicOperation.UNSUBSCRIBE, subName)) .thenCompose(__ -> getTopicReferenceAsync(topicName)) .thenCompose((topic) -> { Subscription sub = topic.getSubscription(subName); @@ -2154,10 +2180,9 @@ private void internalExpireMessagesForAllSubscriptionsForNonPartitionedTopic(Asy final List> futures = new ArrayList<>((int) topic.getReplicators().size()); List subNames = - new ArrayList<>((int) topic.getReplicators().size() - + (int) topic.getSubscriptions().size()); - subNames.addAll(topic.getReplicators().keys()); - subNames.addAll(topic.getSubscriptions().keys()); + new ArrayList<>((int) topic.getSubscriptions().size()); + subNames.addAll(topic.getSubscriptions().keys().stream().filter( + subName -> !subName.equals(Compactor.COMPACTION_SUBSCRIPTION)).toList()); for (int i = 0; i < subNames.size(); i++) { try { futures.add(internalExpireMessagesByTimestampForSinglePartitionAsync(partitionMetadata, @@ -2324,7 +2349,7 @@ protected void internalCreateSubscription(AsyncResponse asyncResponse, String su .thenCompose(allowAutoTopicCreation -> getPartitionedTopicMetadataAsync(topicName, authoritative, allowAutoTopicCreation).thenAccept(partitionMetadata -> { final int numPartitions = partitionMetadata.partitions; - if (numPartitions > 0) { + if (partitionMetadata.partitions > 0 && !isUnexpectedTopicName(partitionMetadata)) { final CompletableFuture future = new CompletableFuture<>(); final AtomicInteger count = new AtomicInteger(numPartitions); final AtomicInteger failureCount = new AtomicInteger(0); @@ -2801,6 +2826,12 @@ public void readEntryComplete(Entry entry, Object ctx) { } } } + + @Override + public String toString() { + return String.format("Topic [%s] get entry batch size", + PersistentTopicsBase.this.topicName); + } }, null); } catch (NullPointerException npe) { batchSizeFuture.completeExceptionally(new RestException(Status.NOT_FOUND, "Message not found")); @@ -2884,6 +2915,9 @@ protected CompletableFuture internalGetMessageById(long ledgerId, long @Override public void readEntryFailed(ManagedLedgerException exception, Object ctx) { + if (exception instanceof ManagedLedgerException.LedgerNotExistException) { + throw new RestException(Status.NOT_FOUND, "Message id not found"); + } throw new RestException(exception); } @@ -2899,6 +2933,12 @@ public void readEntryComplete(Entry entry, Object ctx) { } } } + + @Override + public String toString() { + return String.format("Topic [%s] internal get message by id", + PersistentTopicsBase.this.topicName); + } }, null); return results; }); @@ -3045,6 +3085,10 @@ protected CompletableFuture internalExamineMessageAsync(String initial try { PersistentTopic persistentTopic = (PersistentTopic) topic; long totalMessage = persistentTopic.getNumberOfEntries(); + if (totalMessage <= 0) { + throw new RestException(Status.PRECONDITION_FAILED, + "Could not examine messages due to the total message is zero"); + } PositionImpl startPosition = persistentTopic.getFirstPosition(); long messageToSkip = initialPositionLocal.equals("earliest") ? messagePositionLocal : @@ -3061,6 +3105,12 @@ public void readEntryComplete(Entry entry, Object ctx) { public void readEntryFailed(ManagedLedgerException exception, Object ctx) { future.completeExceptionally(exception); } + + @Override + public String toString() { + return String.format("Topic [%s] internal examine message async", + PersistentTopicsBase.this.topicName); + } }, null); return future; } catch (ManagedLedgerException exception) { @@ -3400,6 +3450,9 @@ protected CompletableFuture internalSetReplicationClusters(List cl return validateTopicPolicyOperationAsync(topicName, PolicyName.REPLICATION, PolicyOperation.WRITE) .thenCompose(__ -> validatePoliciesReadOnlyAccessAsync()) .thenCompose(__ -> { + if (CollectionUtils.isEmpty(clusterIds)) { + throw new RestException(Status.PRECONDITION_FAILED, "ClusterIds should not be null or empty"); + } Set replicationClusters = Sets.newHashSet(clusterIds); if (replicationClusters.contains("global")) { throw new RestException(Status.PRECONDITION_FAILED, @@ -3716,7 +3769,7 @@ protected CompletableFuture preValidation(boolean authoritative) { .thenCompose(metadata -> { if (metadata.partitions > 0) { return validateTopicOwnershipAsync(TopicName.get(topicName.toString() - + TopicName.PARTITIONED_TOPIC_SUFFIX + 0), authoritative); + + PARTITIONED_TOPIC_SUFFIX + 0), authoritative); } else { return validateTopicOwnershipAsync(topicName, authoritative); } @@ -4388,8 +4441,13 @@ public CompletableFuture getPartitionedTopicMetadata( // validates global-namespace contains local/peer cluster: if peer/local cluster present then lookup can // serve/redirect request else fail partitioned-metadata-request so, client fails while creating // producer/consumer + // It is necessary for system topic operations because system topics are used to store metadata + // and other vital information. Even after namespace starting deletion,, + // we need to access the metadata of system topics to create readers and clean up topic data. + // If we don't do this, it can prevent namespace deletion due to inaccessible readers. authorizationFuture.thenCompose(__ -> - checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject())) + checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject(), + SystemTopicNames.isSystemTopic(topicName))) .thenCompose(res -> pulsar.getBrokerService().fetchPartitionedTopicMetadataCheckAllowAutoCreationAsync(topicName)) .thenAccept(metadata -> { @@ -4416,7 +4474,11 @@ public static CompletableFuture unsafeGetPartitionedTo // validates global-namespace contains local/peer cluster: if peer/local cluster present then lookup can // serve/redirect request else fail partitioned-metadata-request so, client fails while creating // producer/consumer - checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject()) + // It is necessary for system topic operations because system topics are used to store metadata + // and other vital information. Even after namespace starting deletion,, + // we need to access the metadata of system topics to create readers and clean up topic data. + // If we don't do this, it can prevent namespace deletion due to inaccessible readers. + checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject(), isSystemTopic(topicName)) .thenCompose(res -> pulsar.getBrokerService() .fetchPartitionedTopicMetadataCheckAllowAutoCreationAsync(topicName)) .thenAccept(metadata -> { @@ -4543,7 +4605,7 @@ protected CompletableFuture internalValidateClientVersionAsync() { private CompletableFuture validatePartitionTopicUpdateAsync(String topicName, int numberOfPartition) { return internalGetListAsync().thenCompose(existingTopicList -> { TopicName partitionTopicName = TopicName.get(domain(), namespaceName, topicName); - String prefix = partitionTopicName.getPartitionedTopicName() + TopicName.PARTITIONED_TOPIC_SUFFIX; + String prefix = partitionTopicName.getPartitionedTopicName() + PARTITIONED_TOPIC_SUFFIX; return getPartitionedTopicMetadataAsync(partitionTopicName, false, false) .thenAccept(metadata -> { int oldPartition = metadata.partitions; @@ -4551,8 +4613,8 @@ private CompletableFuture validatePartitionTopicUpdateAsync(String topicNa if (existingTopicName.startsWith(prefix)) { try { long suffix = Long.parseLong(existingTopicName.substring( - existingTopicName.indexOf(TopicName.PARTITIONED_TOPIC_SUFFIX) - + TopicName.PARTITIONED_TOPIC_SUFFIX.length())); + existingTopicName.indexOf(PARTITIONED_TOPIC_SUFFIX) + + PARTITIONED_TOPIC_SUFFIX.length())); // Skip partition of partitioned topic by making sure // the numeric suffix greater than old partition number. if (suffix >= oldPartition && suffix <= (long) numberOfPartition) { @@ -4593,12 +4655,12 @@ private CompletableFuture validatePartitionTopicUpdateAsync(String topicNa */ private CompletableFuture validateNonPartitionTopicNameAsync(String topicName) { CompletableFuture ret = CompletableFuture.completedFuture(null); - if (topicName.contains(TopicName.PARTITIONED_TOPIC_SUFFIX)) { + if (topicName.contains(PARTITIONED_TOPIC_SUFFIX)) { try { // First check if what's after suffix "-partition-" is number or not, if not number then can create. - int partitionIndex = topicName.indexOf(TopicName.PARTITIONED_TOPIC_SUFFIX); + int partitionIndex = topicName.indexOf(PARTITIONED_TOPIC_SUFFIX); long suffix = Long.parseLong(topicName.substring(partitionIndex - + TopicName.PARTITIONED_TOPIC_SUFFIX.length())); + + PARTITIONED_TOPIC_SUFFIX.length())); TopicName partitionTopicName = TopicName.get(domain(), namespaceName, topicName.substring(0, partitionIndex)); ret = getPartitionedTopicMetadataAsync(partitionTopicName, false, false) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java index 91a8140fde12b..454b8f0fac82c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java @@ -34,6 +34,7 @@ import org.apache.pulsar.broker.service.schema.SchemaRegistry.SchemaAndMetadata; import org.apache.pulsar.broker.service.schema.SchemaRegistryService; import org.apache.pulsar.broker.web.RestException; +import org.apache.pulsar.client.impl.schema.SchemaUtils; import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; @@ -114,10 +115,6 @@ public CompletableFuture deleteSchemaAsync(boolean authoritative, } public CompletableFuture postSchemaAsync(PostSchemaPayload payload, boolean authoritative) { - if (SchemaType.BYTES.name().equals(payload.getType())) { - return CompletableFuture.failedFuture(new RestException(Response.Status.NOT_ACCEPTABLE, - "Do not upload a BYTES schema, because it's the default schema type")); - } return validateOwnershipAndOperationAsync(authoritative, TopicOperation.PRODUCE) .thenCompose(__ -> getSchemaCompatibilityStrategyAsyncWithoutAuth()) .thenCompose(schemaCompatibilityStrategy -> { @@ -150,8 +147,13 @@ public CompletableFuture> testCompati .thenCompose(__ -> getSchemaCompatibilityStrategyAsync()) .thenCompose(strategy -> { String schemaId = getSchemaId(); + final SchemaType schemaType = SchemaType.valueOf(payload.getType()); + byte[] data = payload.getSchema().getBytes(StandardCharsets.UTF_8); + if (schemaType.getValue() == SchemaType.KEY_VALUE.getValue()) { + data = SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema(data); + } return pulsar().getSchemaRegistryService().isCompatible(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(StandardCharsets.UTF_8)) + SchemaData.builder().data(data) .isDeleted(false) .timestamp(clock.millis()).type(SchemaType.valueOf(payload.getType())) .user(defaultIfEmpty(clientAppId(), "")) @@ -165,10 +167,14 @@ public CompletableFuture getVersionBySchemaAsync(PostSchemaPayload payload return validateOwnershipAndOperationAsync(authoritative, TopicOperation.GET_METADATA) .thenCompose(__ -> { String schemaId = getSchemaId(); + final SchemaType schemaType = SchemaType.valueOf(payload.getType()); + byte[] data = payload.getSchema().getBytes(StandardCharsets.UTF_8); + if (schemaType.getValue() == SchemaType.KEY_VALUE.getValue()) { + data = SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema(data); + } return pulsar().getSchemaRegistryService() .findSchemaVersion(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(StandardCharsets.UTF_8)) - .isDeleted(false).timestamp(clock.millis()) + SchemaData.builder().data(data).isDeleted(false).timestamp(clock.millis()) .type(SchemaType.valueOf(payload.getType())) .user(defaultIfEmpty(clientAppId(), "")) .props(payload.getProperties()).build()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java index d596cbdd39db9..b322cc824f65f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java @@ -436,11 +436,13 @@ protected CompletableFuture getExistingPersistentTopicAsync(boo CompletableFuture> topicFuture = pulsar().getBrokerService() .getTopics().get(topicName.toString()); if (topicFuture == null) { - return FutureUtil.failedFuture(new RestException(NOT_FOUND, "Topic not found")); + return FutureUtil.failedFuture(new RestException(NOT_FOUND, + String.format("Topic not found %s", topicName.toString()))); } return topicFuture.thenCompose(optionalTopic -> { if (!optionalTopic.isPresent()) { - return FutureUtil.failedFuture(new RestException(NOT_FOUND, "Topic not found")); + return FutureUtil.failedFuture(new RestException(NOT_FOUND, + String.format("Topic not found %s", topicName.toString()))); } return CompletableFuture.completedFuture((PersistentTopic) optionalTopic.get()); }); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java index 0d857f2211f41..1c1dd74719641 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java @@ -284,11 +284,12 @@ public List getListFromBundle(@PathParam("property") String property, @P } } - private Topic getTopicReference(TopicName topicName) { + private Topic getTopicReference(final TopicName topicName) { try { return pulsar().getBrokerService().getTopicIfExists(topicName.toString()) .get(config().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS) - .orElseThrow(() -> new RestException(Status.NOT_FOUND, "Topic not found")); + .orElseThrow(() -> new RestException(Status.NOT_FOUND, + String.format("Topic not found %s", topicName.toString()))); } catch (ExecutionException e) { throw new RuntimeException(e.getCause()); } catch (InterruptedException | TimeoutException e) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java index cc269d02831d8..e82ba2267fe09 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java @@ -51,7 +51,6 @@ import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.web.RestException; -import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.EntryFilters; import org.apache.pulsar.common.policies.data.NamespaceOperation; @@ -230,7 +229,8 @@ public void getPartitionedStats( getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenAccept(partitionMetadata -> { if (partitionMetadata.partitions == 0) { - asyncResponse.resume(new RestException(Status.NOT_FOUND, "Partitioned Topic not found")); + asyncResponse.resume(new RestException(Status.NOT_FOUND, + String.format("Partitioned topic not found %s", topicName.toString()))); return; } NonPersistentPartitionedTopicStatsImpl stats = @@ -445,44 +445,39 @@ public void getListFromBundle( bundleRange); asyncResponse.resume(Response.noContent().build()); } else { - NamespaceBundle nsBundle; - try { - nsBundle = validateNamespaceBundleOwnership(namespaceName, policies.bundles, - bundleRange, true, true); - } catch (WebApplicationException wae) { - asyncResponse.resume(wae); - return; - } - try { - ConcurrentOpenHashMap> bundleTopics = - pulsar().getBrokerService().getMultiLayerTopicsMap().get(namespaceName.toString()); - if (bundleTopics == null || bundleTopics.isEmpty()) { - asyncResponse.resume(Collections.emptyList()); - return; - } - final List topicList = new ArrayList<>(); - String bundleKey = namespaceName.toString() + "/" + nsBundle.getBundleRange(); - ConcurrentOpenHashMap topicMap = bundleTopics.get(bundleKey); - if (topicMap != null) { - topicList.addAll(topicMap.keys().stream() - .filter(name -> !TopicName.get(name).isPersistent()) - .collect(Collectors.toList())); - } - asyncResponse.resume(topicList); - } catch (Exception e) { - log.error("[{}] Failed to list topics on namespace bundle {}/{}", clientAppId(), - namespaceName, bundleRange, e); - asyncResponse.resume(new RestException(e)); - } + validateNamespaceBundleOwnershipAsync(namespaceName, policies.bundles, bundleRange, true, true) + .thenAccept(nsBundle -> { + ConcurrentOpenHashMap> bundleTopics = + pulsar().getBrokerService() + .getMultiLayerTopicsMap().get(namespaceName.toString()); + if (bundleTopics == null || bundleTopics.isEmpty()) { + asyncResponse.resume(Collections.emptyList()); + return; + } + final List topicList = new ArrayList<>(); + String bundleKey = namespaceName.toString() + "/" + nsBundle.getBundleRange(); + ConcurrentOpenHashMap topicMap = bundleTopics.get(bundleKey); + if (topicMap != null) { + topicList.addAll(topicMap.keys().stream() + .filter(name -> !TopicName.get(name).isPersistent()) + .collect(Collectors.toList())); + } + asyncResponse.resume(topicList); + }).exceptionally(ex -> { + if (!isRedirectException(ex)) { + log.error("[{}] Failed to list topics on namespace bundle {}/{}", clientAppId(), + namespaceName, bundleRange, ex); + } + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); } }).exceptionally(ex -> { - log.error("[{}] Failed to list topics on namespace bundle {}/{}", clientAppId(), - namespaceName, bundleRange, ex); - if (ex.getCause() instanceof WebApplicationException) { - asyncResponse.resume(ex.getCause()); - } else { - asyncResponse.resume(new RestException(ex.getCause())); + if (!isRedirectException(ex)) { + log.error("[{}] Failed to list topics on namespace bundle {}/{}", clientAppId(), + namespaceName, bundleRange, ex); } + resumeAsyncResponseExceptionally(asyncResponse, ex); return null; }); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java index eee363aeed86b..15d23d6e4d85f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java @@ -2406,7 +2406,8 @@ public void getRetention(@Suspended final AsyncResponse asyncResponse, @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); - preValidation(authoritative) + validateTopicPolicyOperationAsync(topicName, PolicyName.RETENTION, PolicyOperation.READ) + .thenCompose(__ -> preValidation(authoritative)) .thenCompose(__ -> internalGetRetention(applied, isGlobal)) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { @@ -2433,7 +2434,8 @@ public void setRetention(@Suspended final AsyncResponse asyncResponse, @QueryParam("isGlobal") @DefaultValue("false") boolean isGlobal, @ApiParam(value = "Retention policies for the specified topic") RetentionPolicies retention) { validateTopicName(tenant, namespace, encodedTopic); - preValidation(authoritative) + validateTopicPolicyOperationAsync(topicName, PolicyName.RETENTION, PolicyOperation.WRITE) + .thenCompose(__ -> preValidation(authoritative)) .thenCompose(__ -> internalSetRetention(retention, isGlobal)) .thenRun(() -> { try { @@ -2469,7 +2471,8 @@ public void removeRetention(@Suspended final AsyncResponse asyncResponse, @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); - preValidation(authoritative) + validateTopicPolicyOperationAsync(topicName, PolicyName.RETENTION, PolicyOperation.WRITE) + .thenCompose(__ -> preValidation(authoritative)) .thenCompose(__ -> internalRemoveRetention(isGlobal)) .thenRun(() -> { log.info("[{}] Successfully remove retention: namespace={}, topic={}", diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java index f0feb8b27d6a1..33076fd51a8e9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java @@ -21,13 +21,20 @@ import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; import io.netty.util.concurrent.DefaultThreadFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.mledger.ManagedCursor; +import org.apache.commons.collections4.MapUtils; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.delayed.bucket.BookkeeperBucketSnapshotStorage; import org.apache.pulsar.broker.delayed.bucket.BucketDelayedDeliveryTracker; import org.apache.pulsar.broker.delayed.bucket.BucketSnapshotStorage; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; +import org.apache.pulsar.common.util.FutureUtil; public class BucketDelayedDeliveryTrackerFactory implements DelayedDeliveryTrackerFactory { @@ -72,6 +79,31 @@ public DelayedDeliveryTracker newTracker(PersistentDispatcherMultipleConsumers d delayedDeliveryMaxIndexesPerBucketSnapshotSegment, delayedDeliveryMaxNumBuckets); } + /** + * Clean up residual snapshot data. + * If tracker has not been created or has been closed, then we can't clean up the snapshot with `tracker.clear`, + * this method can clean up the residual snapshots without creating a tracker. + */ + public CompletableFuture cleanResidualSnapshots(ManagedCursor cursor) { + Map cursorProperties = cursor.getCursorProperties(); + if (MapUtils.isEmpty(cursorProperties)) { + return CompletableFuture.completedFuture(null); + } + List> futures = new ArrayList<>(); + FutureUtil.Sequencer sequencer = FutureUtil.Sequencer.create(); + cursorProperties.forEach((k, v) -> { + if (k != null && v != null && k.startsWith(BucketDelayedDeliveryTracker.DELAYED_BUCKET_KEY_PREFIX)) { + CompletableFuture future = sequencer.sequential(() -> { + return cursor.removeCursorProperty(k) + .thenCompose(__ -> bucketSnapshotStorage.deleteBucketSnapshot(Long.parseLong(v))); + }); + futures.add(future); + } + }); + + return FutureUtil.waitForAll(futures); + } + @Override public void close() throws Exception { if (bucketSnapshotStorage != null) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BookkeeperBucketSnapshotStorage.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BookkeeperBucketSnapshotStorage.java index e7d4f9301dd36..e99f39b382f56 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BookkeeperBucketSnapshotStorage.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BookkeeperBucketSnapshotStorage.java @@ -19,13 +19,15 @@ package org.apache.pulsar.broker.delayed.bucket; import com.google.protobuf.InvalidProtocolBufferException; -import java.io.IOException; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import javax.validation.constraints.NotNull; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException; @@ -35,8 +37,9 @@ import org.apache.bookkeeper.mledger.impl.LedgerMetadataUtils; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.SnapshotMetadata; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; +import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.util.FutureUtil; @Slf4j @@ -48,6 +51,8 @@ public class BookkeeperBucketSnapshotStorage implements BucketSnapshotStorage { private final ServiceConfiguration config; private BookKeeper bookKeeper; + private final Map> ledgerHandleFutureCache = new ConcurrentHashMap<>(); + public BookkeeperBucketSnapshotStorage(PulsarService pulsar) { this.pulsar = pulsar; this.config = pulsar.getConfig(); @@ -57,8 +62,9 @@ public BookkeeperBucketSnapshotStorage(PulsarService pulsar) { public CompletableFuture createBucketSnapshot(SnapshotMetadata snapshotMetadata, List bucketSnapshotSegments, String bucketKey, String topicName, String cursorName) { + ByteBuf metadataByteBuf = Unpooled.wrappedBuffer(snapshotMetadata.toByteArray()); return createLedger(bucketKey, topicName, cursorName) - .thenCompose(ledgerHandle -> addEntry(ledgerHandle, snapshotMetadata.toByteArray()) + .thenCompose(ledgerHandle -> addEntry(ledgerHandle, metadataByteBuf) .thenCompose(__ -> addSnapshotSegments(ledgerHandle, bucketSnapshotSegments)) .thenCompose(__ -> closeLedger(ledgerHandle)) .thenApply(__ -> ledgerHandle.getId())); @@ -66,26 +72,30 @@ public CompletableFuture createBucketSnapshot(SnapshotMetadata snapshotMet @Override public CompletableFuture getBucketSnapshotMetadata(long bucketId) { - return openLedger(bucketId).thenCompose( - ledgerHandle -> getLedgerEntry(ledgerHandle, 0, 0). - thenApply(entryEnumeration -> parseSnapshotMetadataEntry(entryEnumeration.nextElement()))); + return getLedgerHandle(bucketId).thenCompose(ledgerHandle -> getLedgerEntry(ledgerHandle, 0, 0) + .thenApply(entryEnumeration -> parseSnapshotMetadataEntry(entryEnumeration.nextElement()))); } @Override public CompletableFuture> getBucketSnapshotSegment(long bucketId, long firstSegmentEntryId, long lastSegmentEntryId) { - return openLedger(bucketId).thenCompose( - ledgerHandle -> getLedgerEntry(ledgerHandle, firstSegmentEntryId, - lastSegmentEntryId).thenApply(this::parseSnapshotSegmentEntries)); + return getLedgerHandle(bucketId).thenCompose( + ledgerHandle -> getLedgerEntry(ledgerHandle, firstSegmentEntryId, lastSegmentEntryId) + .thenApply(this::parseSnapshotSegmentEntries)); } @Override public CompletableFuture getBucketSnapshotLength(long bucketId) { - return openLedger(bucketId).thenApply(LedgerHandle::getLength); + return getLedgerHandle(bucketId).thenCompose( + ledgerHandle -> CompletableFuture.completedFuture(ledgerHandle.getLength())); } @Override public CompletableFuture deleteBucketSnapshot(long bucketId) { + CompletableFuture ledgerHandleFuture = ledgerHandleFutureCache.remove(bucketId); + if (ledgerHandleFuture != null) { + ledgerHandleFuture.whenComplete((lh, ex) -> closeLedger(lh)); + } return deleteLedger(bucketId); } @@ -110,32 +120,49 @@ public void close() throws Exception { private CompletableFuture addSnapshotSegments(LedgerHandle ledgerHandle, List bucketSnapshotSegments) { List> addFutures = new ArrayList<>(); + ByteBuf byteBuf; for (SnapshotSegment bucketSnapshotSegment : bucketSnapshotSegments) { - addFutures.add(addEntry(ledgerHandle, bucketSnapshotSegment.toByteArray())); + byteBuf = PulsarByteBufAllocator.DEFAULT.directBuffer(bucketSnapshotSegment.getSerializedSize()); + try { + bucketSnapshotSegment.writeTo(byteBuf); + } catch (Exception e){ + byteBuf.release(); + throw e; + } + addFutures.add(addEntry(ledgerHandle, byteBuf)); } return FutureUtil.waitForAll(addFutures); } private SnapshotMetadata parseSnapshotMetadataEntry(LedgerEntry ledgerEntry) { + ByteBuf entryBuffer = null; try { - return SnapshotMetadata.parseFrom(ledgerEntry.getEntry()); + entryBuffer = ledgerEntry.getEntryBuffer(); + return SnapshotMetadata.parseFrom(entryBuffer.nioBuffer()); } catch (InvalidProtocolBufferException e) { throw new BucketSnapshotSerializationException(e); + } finally { + if (entryBuffer != null) { + entryBuffer.release(); + } } } private List parseSnapshotSegmentEntries(Enumeration entryEnumeration) { List snapshotMetadataList = new ArrayList<>(); - try { - while (entryEnumeration.hasMoreElements()) { - LedgerEntry ledgerEntry = entryEnumeration.nextElement(); - snapshotMetadataList.add(SnapshotSegment.parseFrom(ledgerEntry.getEntry())); + while (entryEnumeration.hasMoreElements()) { + LedgerEntry ledgerEntry = entryEnumeration.nextElement(); + SnapshotSegment snapshotSegment = new SnapshotSegment(); + ByteBuf entryBuffer = ledgerEntry.getEntryBuffer(); + try { + snapshotSegment.parseFrom(entryBuffer, entryBuffer.readableBytes()); + } finally { + entryBuffer.release(); } - return snapshotMetadataList; - } catch (IOException e) { - throw new BucketSnapshotSerializationException(e); + snapshotMetadataList.add(snapshotSegment); } + return snapshotMetadataList; } @NotNull @@ -159,6 +186,18 @@ private CompletableFuture createLedger(String bucketKey, String to return future; } + private CompletableFuture getLedgerHandle(Long ledgerId) { + CompletableFuture ledgerHandleCompletableFuture = + ledgerHandleFutureCache.computeIfAbsent(ledgerId, k -> openLedger(ledgerId)); + // remove future of completed exceptionally + ledgerHandleCompletableFuture.whenComplete((__, ex) -> { + if (ex != null) { + ledgerHandleFutureCache.remove(ledgerId, ledgerHandleCompletableFuture); + } + }); + return ledgerHandleCompletableFuture; + } + private CompletableFuture openLedger(Long ledgerId) { final CompletableFuture future = new CompletableFuture<>(); bookKeeper.asyncOpenLedger( @@ -189,7 +228,7 @@ private CompletableFuture closeLedger(LedgerHandle ledgerHandle) { return future; } - private CompletableFuture addEntry(LedgerHandle ledgerHandle, byte[] data) { + private CompletableFuture addEntry(LedgerHandle ledgerHandle, ByteBuf data) { final CompletableFuture future = new CompletableFuture<>(); ledgerHandle.asyncAddEntry(data, (rc, handle, entryId, ctx) -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/Bucket.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/Bucket.java index 5b7023be5034e..a1693b1553d97 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/Bucket.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/Bucket.java @@ -18,8 +18,8 @@ */ package org.apache.pulsar.broker.delayed.bucket; -import static org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.CURSOR_INTERNAL_PROPERTY_PREFIX; import static org.apache.bookkeeper.mledger.util.Futures.executeWithRetry; +import static org.apache.pulsar.broker.delayed.bucket.BucketDelayedDeliveryTracker.DELAYED_BUCKET_KEY_PREFIX; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -31,7 +31,8 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat; +import org.apache.pulsar.broker.delayed.proto.SnapshotMetadata; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.FutureUtil; import org.roaringbitmap.RoaringBitmap; @@ -41,7 +42,6 @@ @AllArgsConstructor abstract class Bucket { - static final String DELAYED_BUCKET_KEY_PREFIX = CURSOR_INTERNAL_PROPERTY_PREFIX + "delayed.bucket"; static final String DELIMITER = "_"; static final int MaxRetryTimes = 3; @@ -133,8 +133,8 @@ long getAndUpdateBucketId() { } CompletableFuture asyncSaveBucketSnapshot( - ImmutableBucket bucket, DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata snapshotMetadata, - List bucketSnapshotSegments) { + ImmutableBucket bucket, SnapshotMetadata snapshotMetadata, + List bucketSnapshotSegments) { final String bucketKey = bucket.bucketKey(); final String cursorName = Codec.decode(cursor.getName()); final String topicName = dispatcherName.substring(0, dispatcherName.lastIndexOf(" / " + cursorName)); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java index a34bd51af98e4..f98c9e000f150 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java @@ -19,9 +19,8 @@ package org.apache.pulsar.broker.delayed.bucket; import static com.google.common.base.Preconditions.checkArgument; -import static org.apache.pulsar.broker.delayed.bucket.Bucket.DELAYED_BUCKET_KEY_PREFIX; +import static org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.CURSOR_INTERNAL_PROPERTY_PREFIX; import static org.apache.pulsar.broker.delayed.bucket.Bucket.DELIMITER; -import static org.apache.pulsar.broker.delayed.bucket.Bucket.MaxRetryTimes; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.HashBasedTable; import com.google.common.collect.Range; @@ -51,11 +50,12 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.mutable.MutableLong; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.delayed.AbstractDelayedDeliveryTracker; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; import org.apache.pulsar.common.policies.data.stats.TopicMetricBean; import org.apache.pulsar.common.util.FutureUtil; @@ -66,6 +66,8 @@ @ThreadSafe public class BucketDelayedDeliveryTracker extends AbstractDelayedDeliveryTracker { + public static final String DELAYED_BUCKET_KEY_PREFIX = CURSOR_INTERNAL_PROPERTY_PREFIX + "delayed.bucket"; + static final CompletableFuture NULL_LONG_PROMISE = CompletableFuture.completedFuture(null); static final int AsyncOperationTimeoutSeconds = 60; @@ -82,7 +84,7 @@ public class BucketDelayedDeliveryTracker extends AbstractDelayedDeliveryTracker private final int maxNumBuckets; - private long numberDelayedMessages; + private volatile long numberDelayedMessages; @Getter @VisibleForTesting @@ -100,6 +102,8 @@ public class BucketDelayedDeliveryTracker extends AbstractDelayedDeliveryTracker private final BucketDelayedMessageIndexStats stats; + private CompletableFuture pendingLoad = null; + public BucketDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispatcher, Timer timer, long tickTimeMillis, boolean isDelayedDeliveryDeliverAtTimeStrict, @@ -134,9 +138,15 @@ public BucketDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispat private synchronized long recoverBucketSnapshot() throws RuntimeException { ManagedCursor cursor = this.lastMutableBucket.getCursor(); + Map cursorProperties = cursor.getCursorProperties(); + if (MapUtils.isEmpty(cursorProperties)) { + log.info("[{}] Recover delayed message index bucket snapshot finish, don't find bucket snapshot", + dispatcher.getName()); + return 0; + } FutureUtil.Sequencer sequencer = this.lastMutableBucket.getSequencer(); Map, ImmutableBucket> toBeDeletedBucketMap = new HashMap<>(); - cursor.getCursorProperties().keySet().forEach(key -> { + cursorProperties.keySet().forEach(key -> { if (key.startsWith(DELAYED_BUCKET_KEY_PREFIX)) { String[] keys = key.split(DELIMITER); checkArgument(keys.length == 3); @@ -165,7 +175,7 @@ private synchronized long recoverBucketSnapshot() throws RuntimeException { } try { - FutureUtil.waitForAll(futures.values()).get(AsyncOperationTimeoutSeconds * 2, TimeUnit.SECONDS); + FutureUtil.waitForAll(futures.values()).get(AsyncOperationTimeoutSeconds * 5, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { log.error("[{}] Failed to recover delayed message index bucket snapshot.", dispatcher.getName(), e); if (e instanceof InterruptedException) { @@ -267,7 +277,7 @@ private void afterCreateImmutableBucket(Pair immu if (ex == null) { immutableBucket.setSnapshotSegments(null); immutableBucket.asyncUpdateSnapshotLength(); - log.info("[{}] Creat bucket snapshot finish, bucketKey: {}", dispatcher.getName(), + log.info("[{}] Create bucket snapshot finish, bucketKey: {}", dispatcher.getName(), immutableBucket.bucketKey()); stats.recordSuccessEvent(BucketDelayedMessageIndexStats.Type.create, @@ -283,8 +293,7 @@ private void afterCreateImmutableBucket(Pair immu // Put indexes back into the shared queue and downgrade to memory mode synchronized (BucketDelayedDeliveryTracker.this) { immutableBucket.getSnapshotSegments().ifPresent(snapshotSegments -> { - for (DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment snapshotSegment : - snapshotSegments) { + for (SnapshotSegment snapshotSegment : snapshotSegments) { for (DelayedIndex delayedIndex : snapshotSegment.getIndexesList()) { sharedBucketPriorityQueue.add(delayedIndex.getTimestamp(), delayedIndex.getLedgerId(), delayedIndex.getEntryId()); @@ -332,7 +341,7 @@ public synchronized boolean addMessage(long ledgerId, long entryId, long deliver afterCreateImmutableBucket(immutableBucketDelayedIndexPair, createStartTime); lastMutableBucket.resetLastMutableBucketRange(); - if (immutableBuckets.asMapOfRanges().size() > maxNumBuckets) { + if (maxNumBuckets > 0 && immutableBuckets.asMapOfRanges().size() > maxNumBuckets) { asyncMergeBucketSnapshot(); } } @@ -341,6 +350,7 @@ public synchronized boolean addMessage(long ledgerId, long entryId, long deliver // If (ledgerId < startLedgerId || existBucket) means that message index belong to previous bucket range, // enter sharedBucketPriorityQueue directly sharedBucketPriorityQueue.add(deliverAt, ledgerId, entryId); + lastMutableBucket.putIndexBit(ledgerId, entryId); } else { checkArgument(ledgerId >= lastMutableBucket.endLedgerId); lastMutableBucket.addMessage(ledgerId, entryId, deliverAt); @@ -447,7 +457,7 @@ private synchronized CompletableFuture asyncMergeBucketSnapshot(List>> getRemainFutures = + List>> getRemainFutures = buckets.stream().map(ImmutableBucket::getRemainSnapshotSegment).toList(); return FutureUtil.waitForAll(getRemainFutures) @@ -482,6 +492,9 @@ private synchronized CompletableFuture asyncMergeBucketSnapshot(List getScheduledMessages(int maxMessages) { + if (!checkPendingLoadDone()) { + if (log.isDebugEnabled()) { + log.debug("[{}] Skip getScheduledMessages to wait for bucket snapshot load finish.", + dispatcher.getName()); + } + return Collections.emptyNavigableSet(); + } + long cutoffTime = getCutoffTime(); lastMutableBucket.moveScheduledMessageToSharedQueue(cutoffTime, sharedBucketPriorityQueue); @@ -556,6 +577,7 @@ public synchronized NavigableSet getScheduledMessages(int maxMessa ImmutableBucket bucket = snapshotSegmentLastIndexTable.get(ledgerId, entryId); if (bucket != null && immutableBuckets.asMapOfRanges().containsValue(bucket)) { + // All message of current snapshot segment are scheduled, try load next snapshot segment if (bucket.merging) { log.info("[{}] Skip load to wait for bucket snapshot merge finish, bucketKey:{}", dispatcher.getName(), bucket.bucketKey()); @@ -567,65 +589,63 @@ public synchronized NavigableSet getScheduledMessages(int maxMessa log.debug("[{}] Loading next bucket snapshot segment, bucketKey: {}, nextSegmentEntryId: {}", dispatcher.getName(), bucket.bucketKey(), preSegmentEntryId + 1); } - // All message of current snapshot segment are scheduled, load next snapshot segment - // TODO make it asynchronous and not blocking this process - try { - boolean createFutureDone = bucket.getSnapshotCreateFuture().orElse(NULL_LONG_PROMISE).isDone(); - - if (!createFutureDone) { - log.info("[{}] Skip load to wait for bucket snapshot create finish, bucketKey:{}", - dispatcher.getName(), bucket.bucketKey()); - break; - } - - if (bucket.currentSegmentEntryId == bucket.lastSegmentEntryId) { - immutableBuckets.asMapOfRanges().remove(Range.closed(bucket.startLedgerId, bucket.endLedgerId)); - bucket.asyncDeleteBucketSnapshot(stats); - continue; - } + boolean createFutureDone = bucket.getSnapshotCreateFuture().orElse(NULL_LONG_PROMISE).isDone(); + if (!createFutureDone) { + log.info("[{}] Skip load to wait for bucket snapshot create finish, bucketKey:{}", + dispatcher.getName(), bucket.bucketKey()); + break; + } - long loadStartTime = System.currentTimeMillis(); - stats.recordTriggerEvent(BucketDelayedMessageIndexStats.Type.load); - bucket.asyncLoadNextBucketSnapshotEntry().thenAccept(indexList -> { + long loadStartTime = System.currentTimeMillis(); + stats.recordTriggerEvent(BucketDelayedMessageIndexStats.Type.load); + CompletableFuture loadFuture = pendingLoad = bucket.asyncLoadNextBucketSnapshotEntry() + .thenAccept(indexList -> { + synchronized (BucketDelayedDeliveryTracker.this) { + this.snapshotSegmentLastIndexTable.remove(ledgerId, entryId); if (CollectionUtils.isEmpty(indexList)) { immutableBuckets.asMapOfRanges() .remove(Range.closed(bucket.startLedgerId, bucket.endLedgerId)); bucket.asyncDeleteBucketSnapshot(stats); return; } - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex + DelayedIndex lastDelayedIndex = indexList.get(indexList.size() - 1); this.snapshotSegmentLastIndexTable.put(lastDelayedIndex.getLedgerId(), lastDelayedIndex.getEntryId(), bucket); - for (DelayedMessageIndexBucketSnapshotFormat.DelayedIndex index : indexList) { + for (DelayedIndex index : indexList) { sharedBucketPriorityQueue.add(index.getTimestamp(), index.getLedgerId(), index.getEntryId()); } - }).whenComplete((__, ex) -> { - if (ex != null) { - // Back bucket state - bucket.setCurrentSegmentEntryId(preSegmentEntryId); - - log.error("[{}] Failed to load bucket snapshot segment, bucketKey: {}, segmentEntryId: {}", - dispatcher.getName(), bucket.bucketKey(), preSegmentEntryId + 1, ex); - - stats.recordFailEvent(BucketDelayedMessageIndexStats.Type.load); - } else { - log.info("[{}] Load next bucket snapshot segment finish, bucketKey: {}, segmentEntryId: {}", - dispatcher.getName(), bucket.bucketKey(), preSegmentEntryId + 1); - - stats.recordSuccessEvent(BucketDelayedMessageIndexStats.Type.load, - System.currentTimeMillis() - loadStartTime); + } + }).whenComplete((__, ex) -> { + if (ex != null) { + // Back bucket state + bucket.setCurrentSegmentEntryId(preSegmentEntryId); + + log.error("[{}] Failed to load bucket snapshot segment, bucketKey: {}, segmentEntryId: {}", + dispatcher.getName(), bucket.bucketKey(), preSegmentEntryId + 1, ex); + + stats.recordFailEvent(BucketDelayedMessageIndexStats.Type.load); + } else { + log.info("[{}] Load next bucket snapshot segment finish, bucketKey: {}, segmentEntryId: {}", + dispatcher.getName(), bucket.bucketKey(), + (preSegmentEntryId == bucket.lastSegmentEntryId) ? "-1" : preSegmentEntryId + 1); + + stats.recordSuccessEvent(BucketDelayedMessageIndexStats.Type.load, + System.currentTimeMillis() - loadStartTime); + } + synchronized (this) { + if (timeout != null) { + timeout.cancel(); } - }).get(AsyncOperationTimeoutSeconds * (MaxRetryTimes + 1), TimeUnit.SECONDS); - } catch (Exception e) { - // Ignore exception to reload this segment on the next schedule. - log.error("[{}] An exception occurs when load next bucket snapshot, bucketKey:{}", - dispatcher.getName(), bucket.bucketKey(), e); + timeout = timer.newTimeout(this, 0, TimeUnit.MILLISECONDS); + } + }); + + if (!checkPendingLoadDone() || loadFuture.isCompletedExceptionally()) { break; } } - snapshotSegmentLastIndexTable.remove(ledgerId, entryId); positions.add(new PositionImpl(ledgerId, entryId)); @@ -641,6 +661,14 @@ public synchronized NavigableSet getScheduledMessages(int maxMessa return positions; } + private synchronized boolean checkPendingLoadDone() { + if (pendingLoad == null || pendingLoad.isDone()) { + pendingLoad = null; + return true; + } + return false; + } + @Override public boolean shouldPauseAllDeliveries() { return false; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketSnapshotStorage.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketSnapshotStorage.java index 51c89bed47af2..7464ef9cd3f63 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketSnapshotStorage.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketSnapshotStorage.java @@ -20,8 +20,8 @@ import java.util.List; import java.util.concurrent.CompletableFuture; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.SnapshotMetadata; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; public interface BucketSnapshotStorage { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/CombinedSegmentDelayedIndexQueue.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/CombinedSegmentDelayedIndexQueue.java index 5655a26878296..006938e9ed271 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/CombinedSegmentDelayedIndexQueue.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/CombinedSegmentDelayedIndexQueue.java @@ -24,8 +24,8 @@ import java.util.PriorityQueue; import javax.annotation.concurrent.NotThreadSafe; import lombok.AllArgsConstructor; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.DelayedIndex; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; @NotThreadSafe class CombinedSegmentDelayedIndexQueue implements DelayedIndexQueue { @@ -40,8 +40,8 @@ static class Node { } private static final Comparator COMPARATOR_NODE = (node1, node2) -> DelayedIndexQueue.COMPARATOR.compare( - node1.segmentList.get(node1.segmentListCursor).getIndexes(node1.segmentCursor), - node2.segmentList.get(node2.segmentListCursor).getIndexes(node2.segmentCursor)); + node1.segmentList.get(node1.segmentListCursor).getIndexeAt(node1.segmentCursor), + node2.segmentList.get(node2.segmentListCursor).getIndexeAt(node2.segmentCursor)); private final PriorityQueue kpq; @@ -77,7 +77,7 @@ private DelayedIndex getValue(boolean needAdvanceCursor) { Objects.requireNonNull(node); SnapshotSegment snapshotSegment = node.segmentList.get(node.segmentListCursor); - DelayedIndex delayedIndex = snapshotSegment.getIndexes(node.segmentCursor); + DelayedIndex delayedIndex = snapshotSegment.getIndexeAt(node.segmentCursor); if (!needAdvanceCursor) { return delayedIndex; } @@ -104,4 +104,16 @@ private DelayedIndex getValue(boolean needAdvanceCursor) { return delayedIndex; } + + @Override + public void popToObject(DelayedIndex delayedIndex) { + DelayedIndex value = getValue(true); + delayedIndex.copyFrom(value); + } + + @Override + public long peekTimestamp() { + DelayedIndex value = getValue(false); + return value.getTimestamp(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueue.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueue.java index dee476c376ec9..f1209a3137a45 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueue.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueue.java @@ -20,10 +20,10 @@ import java.util.Comparator; import java.util.Objects; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; interface DelayedIndexQueue { - Comparator COMPARATOR = (o1, o2) -> { + Comparator COMPARATOR = (o1, o2) -> { if (!Objects.equals(o1.getTimestamp(), o2.getTimestamp())) { return Long.compare(o1.getTimestamp(), o2.getTimestamp()); } else if (!Objects.equals(o1.getLedgerId(), o2.getLedgerId())) { @@ -35,7 +35,11 @@ interface DelayedIndexQueue { boolean isEmpty(); - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex peek(); + DelayedIndex peek(); - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex pop(); + DelayedIndex pop(); + + void popToObject(DelayedIndex delayedIndex); + + long peekTimestamp(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/ImmutableBucket.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/ImmutableBucket.java index 82e98cefa5d98..0932f51f350ce 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/ImmutableBucket.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/ImmutableBucket.java @@ -20,11 +20,10 @@ import static org.apache.bookkeeper.mledger.util.Futures.executeWithRetry; import static org.apache.pulsar.broker.delayed.bucket.BucketDelayedDeliveryTracker.NULL_LONG_PROMISE; -import com.google.protobuf.ByteString; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; @@ -33,18 +32,18 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.mutable.MutableLong; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.DelayedIndex; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegmentMetadata; import org.apache.pulsar.common.util.FutureUtil; +import org.roaringbitmap.InvalidRoaringFormat; import org.roaringbitmap.RoaringBitmap; -import org.roaringbitmap.buffer.ImmutableRoaringBitmap; @Slf4j class ImmutableBucket extends Bucket { @Setter - private List snapshotSegments; + private List snapshotSegments; boolean merging = false; @@ -56,7 +55,7 @@ class ImmutableBucket extends Bucket { super(dispatcherName, cursor, sequencer, storage, startLedgerId, endLedgerId); } - public Optional> getSnapshotSegments() { + public Optional> getSnapshotSegments() { return Optional.ofNullable(snapshotSegments); } @@ -85,7 +84,7 @@ private CompletableFuture> asyncLoadNextBucketSnapshotEntry(b } }), BucketSnapshotPersistenceException.class, MaxRetryTimes) .thenApply(snapshotMetadata -> { - List metadataList = + List metadataList = snapshotMetadata.getMetadataListList(); // Skip all already reach schedule time snapshot segments @@ -98,7 +97,7 @@ private CompletableFuture> asyncLoadNextBucketSnapshotEntry(b this.setLastSegmentEntryId(metadataList.size()); this.recoverDelayedIndexBitMapAndNumber(nextSnapshotEntryIndex, metadataList); List firstScheduleTimestamps = metadataList.stream().map( - SnapshotSegmentMetadata::getMinScheduleTimestamp).toList(); + SnapshotSegmentMetadata::getMinScheduleTimestamp).toList(); this.setFirstScheduleTimestamps(firstScheduleTimestamps); return nextSnapshotEntryIndex + 1; @@ -126,10 +125,9 @@ private CompletableFuture> asyncLoadNextBucketSnapshotEntry(b return Collections.emptyList(); } - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment snapshotSegment = + SnapshotSegment snapshotSegment = bucketSnapshotSegments.get(0); - List indexList = - snapshotSegment.getIndexesList(); + List indexList = snapshotSegment.getIndexesList(); this.setCurrentSegmentEntryId(nextSegmentEntryId); if (isRecover) { this.asyncUpdateSnapshotLength(); @@ -139,28 +137,40 @@ private CompletableFuture> asyncLoadNextBucketSnapshotEntry(b }); } + /** + * Recover delayed index bit map and message numbers. + * @throws InvalidRoaringFormat invalid bitmap serialization format + */ private void recoverDelayedIndexBitMapAndNumber(int startSnapshotIndex, - List segmentMetadata) { - this.delayedIndexBitMap.clear(); - MutableLong numberMessages = new MutableLong(0); - for (int i = startSnapshotIndex; i < segmentMetadata.size(); i++) { - Map bitByteStringMap = segmentMetadata.get(i).getDelayedIndexBitMapMap(); - bitByteStringMap.forEach((leaderId, bitSetString) -> { - boolean exist = this.delayedIndexBitMap.containsKey(leaderId); - RoaringBitmap bitSet = - new ImmutableRoaringBitmap(bitSetString.asReadOnlyByteBuffer()).toRoaringBitmap(); - numberMessages.add(bitSet.getCardinality()); - if (!exist) { - this.delayedIndexBitMap.put(leaderId, bitSet); - } else { - this.delayedIndexBitMap.get(leaderId).or(bitSet); + List segmentMetaList) { + delayedIndexBitMap.clear(); // cleanup dirty bm + final var numberMessages = new MutableLong(0); + for (int i = startSnapshotIndex; i < segmentMetaList.size(); i++) { + for (final var entry : segmentMetaList.get(i).getDelayedIndexBitMapMap().entrySet()) { + final var ledgerId = entry.getKey(); + final var bs = entry.getValue(); + final var sbm = new RoaringBitmap(); + try { + sbm.deserialize(bs.asReadOnlyByteBuffer()); + } catch (IOException e) { + throw new InvalidRoaringFormat(e.getMessage()); } - }); + numberMessages.add(sbm.getCardinality()); + delayedIndexBitMap.compute(ledgerId, (lId, bm) -> { + if (bm == null) { + return sbm; + } + bm.or(sbm); + return bm; + }); + } } - this.setNumberBucketDelayedMessages(numberMessages.getValue()); + // optimize bm + delayedIndexBitMap.values().forEach(RoaringBitmap::runOptimize); + setNumberBucketDelayedMessages(numberMessages.getValue()); } - CompletableFuture> getRemainSnapshotSegment() { + CompletableFuture> getRemainSnapshotSegment() { int nextSegmentEntryId = currentSegmentEntryId + 1; if (nextSegmentEntryId > lastSegmentEntryId) { return CompletableFuture.completedFuture(Collections.emptyList()); @@ -193,7 +203,7 @@ CompletableFuture asyncDeleteBucketSnapshot(BucketDelayedMessageIndexStats stats.recordFailEvent(BucketDelayedMessageIndexStats.Type.delete); } else { log.info("[{}] Delete bucket snapshot finish, bucketId: {}, bucketKey: {}", - dispatcherName, bucketId, bucketKey); + dispatcherName, bucketId, bucketKey); stats.recordSuccessEvent(BucketDelayedMessageIndexStats.Type.delete, System.currentTimeMillis() - deleteStartTime); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/MutableBucket.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/MutableBucket.java index e49ebe9606e01..1173a401a8903 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/MutableBucket.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/MutableBucket.java @@ -19,7 +19,7 @@ package org.apache.pulsar.broker.delayed.bucket; import static com.google.common.base.Preconditions.checkArgument; -import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -30,10 +30,10 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.commons.lang3.tuple.Pair; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.DelayedIndex; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.SnapshotMetadata; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegmentMetadata; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.TripleLongPriorityQueue; import org.roaringbitmap.RoaringBitmap; @@ -74,24 +74,30 @@ Pair createImmutableBucketAndAsyncPersistent( List bucketSnapshotSegments = new ArrayList<>(); List segmentMetadataList = new ArrayList<>(); + Map immutableBucketBitMap = new HashMap<>(); + Map bitMap = new HashMap<>(); - SnapshotSegment.Builder snapshotSegmentBuilder = SnapshotSegment.newBuilder(); + SnapshotSegment snapshotSegment = new SnapshotSegment(); SnapshotSegmentMetadata.Builder segmentMetadataBuilder = SnapshotSegmentMetadata.newBuilder(); List firstScheduleTimestamps = new ArrayList<>(); long currentTimestampUpperLimit = 0; long currentFirstTimestamp = 0L; while (!delayedIndexQueue.isEmpty()) { - DelayedIndex delayedIndex = delayedIndexQueue.peek(); - long timestamp = delayedIndex.getTimestamp(); + final long timestamp = delayedIndexQueue.peekTimestamp(); if (currentTimestampUpperLimit == 0) { currentFirstTimestamp = timestamp; firstScheduleTimestamps.add(currentFirstTimestamp); currentTimestampUpperLimit = timestamp + timeStepPerBucketSnapshotSegment - 1; } - long ledgerId = delayedIndex.getLedgerId(); - long entryId = delayedIndex.getEntryId(); + DelayedIndex delayedIndex = snapshotSegment.addIndexe(); + delayedIndexQueue.popToObject(delayedIndex); + + final long ledgerId = delayedIndex.getLedgerId(); + final long entryId = delayedIndex.getEntryId(); + + removeIndexBit(ledgerId, entryId); checkArgument(ledgerId >= startLedgerId && ledgerId <= endLedgerId); @@ -100,37 +106,49 @@ Pair createImmutableBucketAndAsyncPersistent( sharedQueue.add(timestamp, ledgerId, entryId); } - delayedIndexQueue.pop(); - numMessages++; - bitMap.computeIfAbsent(ledgerId, k -> new RoaringBitmap()).add(entryId, entryId + 1); - snapshotSegmentBuilder.addIndexes(delayedIndex); + numMessages++; - if (delayedIndexQueue.isEmpty() || delayedIndexQueue.peek().getTimestamp() > currentTimestampUpperLimit + if (delayedIndexQueue.isEmpty() || delayedIndexQueue.peekTimestamp() > currentTimestampUpperLimit || (maxIndexesPerBucketSnapshotSegment != -1 - && snapshotSegmentBuilder.getIndexesCount() >= maxIndexesPerBucketSnapshotSegment)) { + && snapshotSegment.getIndexesCount() >= maxIndexesPerBucketSnapshotSegment)) { segmentMetadataBuilder.setMaxScheduleTimestamp(timestamp); segmentMetadataBuilder.setMinScheduleTimestamp(currentFirstTimestamp); currentTimestampUpperLimit = 0; Iterator> iterator = bitMap.entrySet().iterator(); while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - byte[] array = new byte[entry.getValue().serializedSizeInBytes()]; - entry.getValue().serialize(ByteBuffer.wrap(array)); - segmentMetadataBuilder.putDelayedIndexBitMap(entry.getKey(), ByteString.copyFrom(array)); + final var entry = iterator.next(); + final var lId = entry.getKey(); + final var bm = entry.getValue(); + bm.runOptimize(); + ByteBuffer byteBuffer = ByteBuffer.allocate(bm.serializedSizeInBytes()); + bm.serialize(byteBuffer); + byteBuffer.flip(); + segmentMetadataBuilder.putDelayedIndexBitMap(lId, UnsafeByteOperations.unsafeWrap(byteBuffer)); + immutableBucketBitMap.compute(lId, (__, bm0) -> { + if (bm0 == null) { + return bm; + } + bm0.or(bm); + return bm0; + }); iterator.remove(); } segmentMetadataList.add(segmentMetadataBuilder.build()); segmentMetadataBuilder.clear(); - bucketSnapshotSegments.add(snapshotSegmentBuilder.build()); - snapshotSegmentBuilder.clear(); + bucketSnapshotSegments.add(snapshotSegment); + snapshotSegment = new SnapshotSegment(); } } + // optimize bm + immutableBucketBitMap.values().forEach(RoaringBitmap::runOptimize); + this.delayedIndexBitMap.values().forEach(RoaringBitmap::runOptimize); + SnapshotMetadata bucketSnapshotMetadata = SnapshotMetadata.newBuilder() .addAllMetadataList(segmentMetadataList) .build(); @@ -143,6 +161,7 @@ Pair createImmutableBucketAndAsyncPersistent( bucket.setNumberBucketDelayedMessages(numMessages); bucket.setLastSegmentEntryId(lastSegmentEntryId); bucket.setFirstScheduleTimestamps(firstScheduleTimestamps); + bucket.setDelayedIndexBitMap(immutableBucketBitMap); // Skip first segment, because it has already been loaded List snapshotSegments = bucketSnapshotSegments.subList(1, bucketSnapshotSegments.size()); @@ -150,8 +169,8 @@ Pair createImmutableBucketAndAsyncPersistent( // Add the first snapshot segment last message to snapshotSegmentLastMessageTable checkArgument(!bucketSnapshotSegments.isEmpty()); - SnapshotSegment snapshotSegment = bucketSnapshotSegments.get(0); - DelayedIndex lastDelayedIndex = snapshotSegment.getIndexes(snapshotSegment.getIndexesCount() - 1); + SnapshotSegment firstSnapshotSegment = bucketSnapshotSegments.get(0); + DelayedIndex lastDelayedIndex = firstSnapshotSegment.getIndexeAt(firstSnapshotSegment.getIndexesCount() - 1); Pair result = Pair.of(bucket, lastDelayedIndex); CompletableFuture future = asyncSaveBucketSnapshot(bucket, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/TripleLongPriorityDelayedIndexQueue.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/TripleLongPriorityDelayedIndexQueue.java index b8d54bd78b428..4faee3b17f17f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/TripleLongPriorityDelayedIndexQueue.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/TripleLongPriorityDelayedIndexQueue.java @@ -19,7 +19,7 @@ package org.apache.pulsar.broker.delayed.bucket; import javax.annotation.concurrent.NotThreadSafe; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; import org.apache.pulsar.common.util.collections.TripleLongPriorityQueue; @NotThreadSafe @@ -41,17 +41,28 @@ public boolean isEmpty() { } @Override - public DelayedMessageIndexBucketSnapshotFormat.DelayedIndex peek() { - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex delayedIndex = - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex.newBuilder().setTimestamp(queue.peekN1()) - .setLedgerId(queue.peekN2()).setEntryId(queue.peekN3()).build(); + public DelayedIndex peek() { + DelayedIndex delayedIndex = new DelayedIndex().setTimestamp(queue.peekN1()) + .setLedgerId(queue.peekN2()).setEntryId(queue.peekN3()); return delayedIndex; } @Override - public DelayedMessageIndexBucketSnapshotFormat.DelayedIndex pop() { - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex peek = peek(); + public DelayedIndex pop() { + DelayedIndex peek = peek(); queue.pop(); return peek; } + + @Override + public void popToObject(DelayedIndex delayedIndex) { + delayedIndex.setTimestamp(queue.peekN1()) + .setLedgerId(queue.peekN2()).setEntryId(queue.peekN3()); + queue.pop(); + } + + @Override + public long peekTimestamp() { + return queue.peekN1(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java index cef3f0eb609a1..30d1874a97299 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java @@ -59,6 +59,9 @@ public BrokerInterceptors(Map intercep * @return the collection of broker event interceptor */ public static BrokerInterceptor load(ServiceConfiguration conf) throws IOException { + if (conf.getBrokerInterceptors().isEmpty()) { + return null; + } BrokerInterceptorDefinitions definitions = BrokerInterceptorUtils.searchForInterceptors(conf.getBrokerInterceptorsDirectory(), conf.getNarExtractionDirectory()); @@ -87,7 +90,7 @@ public static BrokerInterceptor load(ServiceConfiguration conf) throws IOExcepti }); Map interceptors = builder.build(); - if (interceptors != null && !interceptors.isEmpty()) { + if (!interceptors.isEmpty()) { return new BrokerInterceptors(interceptors); } else { return null; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java index acd34e151ed2a..d7c21de5ea1fa 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java @@ -30,5 +30,23 @@ @AllArgsConstructor @NoArgsConstructor public class LeaderBroker { + private String brokerId; private String serviceUrl; + + public String getBrokerId() { + if (brokerId != null) { + return brokerId; + } else { + // for backward compatibility at runtime with older versions of Pulsar + return parseHostAndPort(serviceUrl); + } + } + + private static String parseHostAndPort(String serviceUrl) { + int uriSeparatorPos = serviceUrl.indexOf("://"); + if (uriSeparatorPos == -1) { + throw new IllegalArgumentException("'" + serviceUrl + "' isn't an URI."); + } + return serviceUrl.substring(uriSeparatorPos + 3); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java index 05fe4353f3e76..2e53b54e98f61 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java @@ -35,17 +35,17 @@ public class LeaderElectionService implements AutoCloseable { private final LeaderElection leaderElection; private final LeaderBroker localValue; - public LeaderElectionService(CoordinationService cs, String localWebServiceAddress, - Consumer listener) { - this(cs, localWebServiceAddress, ELECTION_ROOT, listener); + public LeaderElectionService(CoordinationService cs, String brokerId, + String serviceUrl, Consumer listener) { + this(cs, brokerId, serviceUrl, ELECTION_ROOT, listener); } public LeaderElectionService(CoordinationService cs, - String localWebServiceAddress, - String electionRoot, + String brokerId, + String serviceUrl, String electionRoot, Consumer listener) { this.leaderElection = cs.getLeaderElection(LeaderBroker.class, electionRoot, listener); - this.localValue = new LeaderBroker(localWebServiceAddress); + this.localValue = new LeaderBroker(brokerId, serviceUrl); } public void start() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java index 42ef264b6db04..61f34ef4901ba 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.broker.loadbalance; +import com.google.common.annotations.VisibleForTesting; import java.io.IOException; +import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -45,6 +47,7 @@ public class LinuxInfoUtils { private static final String CGROUPS_CPU_USAGE_PATH = "/sys/fs/cgroup/cpu/cpuacct.usage"; private static final String CGROUPS_CPU_LIMIT_QUOTA_PATH = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"; private static final String CGROUPS_CPU_LIMIT_PERIOD_PATH = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"; + // proc states private static final String PROC_STAT_PATH = "/proc/stat"; private static final String NIC_PATH = "/sys/class/net/"; @@ -52,6 +55,30 @@ public class LinuxInfoUtils { private static final int ARPHRD_ETHER = 1; private static final String NIC_SPEED_TEMPLATE = "/sys/class/net/%s/speed"; + private static Object /*jdk.internal.platform.Metrics*/ metrics; + private static Method getMetricsProviderMethod; + private static Method getCpuQuotaMethod; + private static Method getCpuPeriodMethod; + private static Method getCpuUsageMethod; + + static { + try { + metrics = Class.forName("jdk.internal.platform.Container").getMethod("metrics") + .invoke(null); + if (metrics != null) { + getMetricsProviderMethod = metrics.getClass().getMethod("getProvider"); + getMetricsProviderMethod.setAccessible(true); + getCpuQuotaMethod = metrics.getClass().getMethod("getCpuQuota"); + getCpuQuotaMethod.setAccessible(true); + getCpuPeriodMethod = metrics.getClass().getMethod("getCpuPeriod"); + getCpuPeriodMethod.setAccessible(true); + getCpuUsageMethod = metrics.getClass().getMethod("getCpuUsage"); + getCpuUsageMethod.setAccessible(true); + } + } catch (Throwable e) { + log.warn("Failed to get runtime metrics", e); + } + } /** * Determine whether the OS is the linux kernel. @@ -66,9 +93,14 @@ public static boolean isLinux() { */ public static boolean isCGroupEnabled() { try { - return Files.exists(Paths.get(CGROUPS_CPU_USAGE_PATH)); + if (metrics == null) { + return Files.exists(Paths.get(CGROUPS_CPU_USAGE_PATH)); + } + String provider = (String) getMetricsProviderMethod.invoke(metrics); + log.info("[LinuxInfo] The system metrics provider is: {}", provider); + return provider.contains("cgroup"); } catch (Exception e) { - log.warn("[LinuxInfo] Failed to check cgroup CPU usage file: {}", e.getMessage()); + log.warn("[LinuxInfo] Failed to check cgroup CPU: {}", e.getMessage()); return false; } } @@ -81,13 +113,21 @@ public static boolean isCGroupEnabled() { public static double getTotalCpuLimit(boolean isCGroupsEnabled) { if (isCGroupsEnabled) { try { - long quota = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_QUOTA_PATH)); - long period = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_PERIOD_PATH)); + long quota; + long period; + if (metrics != null && getCpuQuotaMethod != null && getCpuPeriodMethod != null) { + quota = (long) getCpuQuotaMethod.invoke(metrics); + period = (long) getCpuPeriodMethod.invoke(metrics); + } else { + quota = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_QUOTA_PATH)); + period = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_PERIOD_PATH)); + } + if (quota > 0) { return 100.0 * quota / period; } - } catch (IOException e) { - log.warn("[LinuxInfo] Failed to read CPU quotas from cgroups", e); + } catch (Exception e) { + log.warn("[LinuxInfo] Failed to read CPU quotas from cgroup", e); // Fallback to availableProcessors } } @@ -99,11 +139,14 @@ public static double getTotalCpuLimit(boolean isCGroupsEnabled) { * Get CGroup cpu usage. * @return Cpu usage */ - public static double getCpuUsageForCGroup() { + public static long getCpuUsageForCGroup() { try { + if (metrics != null && getCpuUsageMethod != null) { + return (long) getCpuUsageMethod.invoke(metrics); + } return readLongFromFile(Paths.get(CGROUPS_CPU_USAGE_PATH)); - } catch (IOException e) { - log.error("[LinuxInfo] Failed to read CPU usage from {}", CGROUPS_CPU_USAGE_PATH, e); + } catch (Exception e) { + log.error("[LinuxInfo] Failed to read CPU usage from cgroup", e); return -1; } } @@ -118,7 +161,8 @@ public static double getCpuUsageForCGroup() { * *

* Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this - * far. Real CPU usage should equal the sum substracting the idle cycles, this would include iowait, irq and steal. + * far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include + * cpu, user, nice, system, irq, softirq, steal, guest and guest_nice. */ public static ResourceUsage getCpuUsageForEntireHost() { try (Stream stream = Files.lines(Paths.get(PROC_STAT_PATH))) { @@ -132,7 +176,7 @@ public static ResourceUsage getCpuUsageForEntireHost() { .filter(s -> !s.contains("cpu")) .mapToLong(Long::parseLong) .sum(); - long idle = Long.parseLong(words[4]); + long idle = Long.parseLong(words[4]) + Long.parseLong(words[5]); return ResourceUsage.builder() .usage(total - idle) .idle(idle) @@ -291,6 +335,11 @@ enum Operstate { UP } + @VisibleForTesting + public static Object getMetrics() { + return metrics; + } + @AllArgsConstructor public enum NICUsageType { // transport diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java index 87f630f1a09fb..c1fe2a4930c34 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java @@ -64,7 +64,7 @@ public Map getBundleData() { public Map getBundleDataForLoadShedding() { return bundleData.entrySet().stream() - .filter(e -> !NamespaceService.isSystemServiceNamespace( + .filter(e -> !NamespaceService.isSLAOrHeartbeatNamespace( NamespaceBundle.getBundleNamespace(e.getKey()))) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java index 0de2ae92db61a..f9f36b705d4c4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java @@ -43,7 +43,7 @@ public class NoopLoadManager implements LoadManager { private PulsarService pulsar; - private String lookupServiceAddress; + private String brokerId; private ResourceUnit localResourceUnit; private LockManager lockManager; private Map bundleBrokerAffinityMap; @@ -57,16 +57,15 @@ public void initialize(PulsarService pulsar) { @Override public void start() throws PulsarServerException { - lookupServiceAddress = pulsar.getLookupServiceAddress(); - localResourceUnit = new SimpleResourceUnit(String.format("http://%s", lookupServiceAddress), - new PulsarResourceDescription()); + brokerId = pulsar.getBrokerId(); + localResourceUnit = new SimpleResourceUnit(brokerId, new PulsarResourceDescription()); - LocalBrokerData localData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), + LocalBrokerData localData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners()); localData.setProtocols(pulsar.getProtocolDataToAdvertise()); localData.setLoadManagerClassName(this.pulsar.getConfig().getLoadManagerClassName()); - String brokerReportPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; + String brokerReportPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + brokerId; try { log.info("Acquiring broker resource lock on {}", brokerReportPath); @@ -129,12 +128,12 @@ public void disableBroker() throws Exception { @Override public Set getAvailableBrokers() throws Exception { - return Collections.singleton(lookupServiceAddress); + return Collections.singleton(brokerId); } @Override public CompletableFuture> getAvailableBrokersAsync() { - return CompletableFuture.completedFuture(Collections.singleton(lookupServiceAddress)); + return CompletableFuture.completedFuture(Collections.singleton(brokerId)); } @Override @@ -153,7 +152,6 @@ public String setNamespaceBundleAffinity(String bundle, String broker) { if (StringUtils.isBlank(broker)) { return this.bundleBrokerAffinityMap.remove(bundle); } - broker = broker.replaceFirst("http[s]?://", ""); return this.bundleBrokerAffinityMap.put(bundle, broker); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java index ef4dd2a97b280..c28a8be4c0d3a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java @@ -23,8 +23,6 @@ */ public interface ResourceUnit extends Comparable { - String PROPERTY_KEY_BROKER_ZNODE_NAME = "__advertised_addr"; - String getResourceId(); ResourceDescription getAvailableResource(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java index 921ce35b5c65e..18e30ddf922d0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java @@ -82,9 +82,9 @@ public BrokerRegistryImpl(PulsarService pulsar) { this.brokerLookupDataLockManager = pulsar.getCoordinationService().getLockManager(BrokerLookupData.class); this.scheduler = pulsar.getLoadManagerExecutor(); this.listeners = new ArrayList<>(); - this.brokerId = pulsar.getLookupServiceAddress(); + this.brokerId = pulsar.getBrokerId(); this.brokerLookupData = new BrokerLookupData( - pulsar.getSafeWebServiceAddress(), + pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java index 4ebf537f7a8a8..6a0e677c66268 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java @@ -21,31 +21,40 @@ import static java.lang.String.format; import static org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl.Role.Follower; import static org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl.Role.Leader; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl.TOPIC; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Label.Success; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.Admin; +import static org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.getNamespaceBundle; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; -import org.apache.pulsar.broker.loadbalance.BrokerFilterException; import org.apache.pulsar.broker.loadbalance.LeaderElectionService; import org.apache.pulsar.broker.loadbalance.LoadManager; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannel; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLoadData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.loadbalance.extensions.data.TopBundlesLoadData; @@ -78,6 +87,9 @@ import org.apache.pulsar.broker.loadbalance.extensions.strategy.LeastResourceUsageWithWeight; import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceAllocationPolicies; +import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; +import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; import org.apache.pulsar.common.naming.NamespaceName; @@ -86,7 +98,6 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.metadata.api.coordination.LeaderElectionState; import org.slf4j.Logger; @@ -107,6 +118,8 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { private static final long MONITOR_INTERVAL_IN_MILLIS = 120_000; + public static final long COMPACTION_THRESHOLD = 5 * 1024 * 1024; + private static final String ELECTION_ROOT = "/loadbalance/extension/leader"; private PulsarService pulsar; @@ -143,6 +156,7 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { @Getter private final List brokerFilterPipeline; + /** * The load data reporter. */ @@ -160,7 +174,9 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { private SplitManager splitManager; - private boolean started = false; + private volatile boolean started = false; + + private boolean configuredSystemTopics = false; private final AssignCounter assignCounter = new AssignCounter(); @Getter @@ -168,16 +184,66 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { private final SplitCounter splitCounter = new SplitCounter(); // record unload metrics - private final AtomicReference> unloadMetrics = new AtomicReference(); + private final AtomicReference> unloadMetrics = new AtomicReference<>(); // record split metrics private final AtomicReference> splitMetrics = new AtomicReference<>(); - private final ConcurrentOpenHashMap>> - lookupRequests = ConcurrentOpenHashMap.>>newBuilder() - .build(); + private final ConcurrentHashMap>> + lookupRequests = new ConcurrentHashMap<>(); private final CountDownLatch initWaiter = new CountDownLatch(1); + /** + * Get all the bundles that are owned by this broker. + */ + public Set getOwnedServiceUnits() { + if (!started) { + log.warn("Failed to get owned service units, load manager is not started."); + return Collections.emptySet(); + } + Set> entrySet = serviceUnitStateChannel.getOwnershipEntrySet(); + String brokerId = brokerRegistry.getBrokerId(); + Set ownedServiceUnits = entrySet.stream() + .filter(entry -> { + var stateData = entry.getValue(); + return stateData.state() == ServiceUnitState.Owned + && StringUtils.isNotBlank(stateData.dstBroker()) + && stateData.dstBroker().equals(brokerId); + }).map(entry -> { + var bundle = entry.getKey(); + return getNamespaceBundle(pulsar, bundle); + }).collect(Collectors.toSet()); + // Add heartbeat and SLA monitor namespace bundle. + NamespaceName heartbeatNamespace = NamespaceService.getHeartbeatNamespace(brokerId, pulsar.getConfiguration()); + try { + NamespaceBundle fullBundle = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespace); + ownedServiceUnits.add(fullBundle); + } catch (Exception e) { + log.warn("Failed to get heartbeat namespace bundle.", e); + } + NamespaceName heartbeatNamespaceV2 = NamespaceService + .getHeartbeatNamespaceV2(brokerId, pulsar.getConfiguration()); + try { + NamespaceBundle fullBundle = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespaceV2); + ownedServiceUnits.add(fullBundle); + } catch (Exception e) { + log.warn("Failed to get heartbeat namespace V2 bundle.", e); + } + + NamespaceName slaMonitorNamespace = NamespaceService + .getSLAMonitorNamespace(brokerId, pulsar.getConfiguration()); + try { + NamespaceBundle fullBundle = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(slaMonitorNamespace); + ownedServiceUnits.add(fullBundle); + } catch (Exception e) { + log.warn("Failed to get SLA Monitor namespace bundle.", e); + } + + return ownedServiceUnits; + } + public enum Role { Leader, Follower @@ -201,6 +267,10 @@ public static boolean isLoadManagerExtensionEnabled(ServiceConfiguration conf) { return ExtensibleLoadManagerImpl.class.getName().equals(conf.getLoadManagerClassName()); } + public static boolean isLoadManagerExtensionEnabled(PulsarService pulsar) { + return pulsar.getLoadManager().get() instanceof ExtensibleLoadManagerWrapper; + } + public static ExtensibleLoadManagerImpl get(LoadManager loadManager) { if (!(loadManager instanceof ExtensibleLoadManagerWrapper loadManagerWrapper)) { throw new IllegalArgumentException("The load manager should be 'ExtensibleLoadManagerWrapper'."); @@ -212,104 +282,151 @@ public static boolean debug(ServiceConfiguration config, Logger log) { return config.isLoadBalancerDebugModeEnabled() || log.isDebugEnabled(); } + public static void createSystemTopic(PulsarService pulsar, String topic) throws PulsarServerException { + try { + pulsar.getAdminClient().topics().createNonPartitionedTopic(topic); + log.info("Created topic {}.", topic); + } catch (PulsarAdminException.ConflictException ex) { + if (debug(pulsar.getConfiguration(), log)) { + log.info("Topic {} already exists.", topic); + } + } catch (PulsarAdminException e) { + throw new PulsarServerException(e); + } + } + + private static void createSystemTopics(PulsarService pulsar) throws PulsarServerException { + createSystemTopic(pulsar, BROKER_LOAD_DATA_STORE_TOPIC); + createSystemTopic(pulsar, TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); + } + + private static boolean configureSystemTopics(PulsarService pulsar) { + try { + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(pulsar) + && (pulsar.getConfiguration().isSystemTopicEnabled() + && pulsar.getConfiguration().isTopicLevelPoliciesEnabled())) { + Long threshold = pulsar.getAdminClient().topicPolicies().getCompactionThreshold(TOPIC); + if (threshold == null || COMPACTION_THRESHOLD != threshold.longValue()) { + pulsar.getAdminClient().topicPolicies().setCompactionThreshold(TOPIC, COMPACTION_THRESHOLD); + log.info("Set compaction threshold: {} bytes for system topic {}.", COMPACTION_THRESHOLD, TOPIC); + } + } else { + log.warn("System topic or topic level policies is disabled. " + + "{} compaction threshold follows the broker or namespace policies.", TOPIC); + } + return true; + } catch (Exception e) { + log.error("Failed to set compaction threshold for system topic:{}", TOPIC, e); + } + return false; + } + @Override public void start() throws PulsarServerException { if (this.started) { return; } - this.brokerRegistry = new BrokerRegistryImpl(pulsar); - this.leaderElectionService = new LeaderElectionService( - pulsar.getCoordinationService(), pulsar.getSafeWebServiceAddress(), ELECTION_ROOT, - state -> { - pulsar.getLoadManagerExecutor().execute(() -> { - if (state == LeaderElectionState.Leading) { - playLeader(); - } else { - playFollower(); - } + try { + this.brokerRegistry = new BrokerRegistryImpl(pulsar); + this.leaderElectionService = new LeaderElectionService( + pulsar.getCoordinationService(), pulsar.getBrokerId(), + pulsar.getSafeWebServiceAddress(), ELECTION_ROOT, + state -> { + pulsar.getLoadManagerExecutor().execute(() -> { + if (state == LeaderElectionState.Leading) { + playLeader(); + } else { + playFollower(); + } + }); }); - }); - this.serviceUnitStateChannel = new ServiceUnitStateChannelImpl(pulsar); - this.brokerRegistry.start(); - this.splitManager = new SplitManager(splitCounter); - this.unloadManager = new UnloadManager(unloadCounter); - this.serviceUnitStateChannel.listen(unloadManager); - this.serviceUnitStateChannel.listen(splitManager); - this.leaderElectionService.start(); - this.serviceUnitStateChannel.start(); - this.antiAffinityGroupPolicyHelper = - new AntiAffinityGroupPolicyHelper(pulsar, serviceUnitStateChannel); - antiAffinityGroupPolicyHelper.listenFailureDomainUpdate(); - this.antiAffinityGroupPolicyFilter = new AntiAffinityGroupPolicyFilter(antiAffinityGroupPolicyHelper); - this.brokerFilterPipeline.add(antiAffinityGroupPolicyFilter); - SimpleResourceAllocationPolicies policies = new SimpleResourceAllocationPolicies(pulsar); - this.isolationPoliciesHelper = new IsolationPoliciesHelper(policies); - this.brokerFilterPipeline.add(new BrokerIsolationPoliciesFilter(isolationPoliciesHelper)); + this.serviceUnitStateChannel = ServiceUnitStateChannelImpl.newInstance(pulsar); + this.brokerRegistry.start(); + this.splitManager = new SplitManager(splitCounter); + this.unloadManager = new UnloadManager(unloadCounter); + this.serviceUnitStateChannel.listen(unloadManager); + this.serviceUnitStateChannel.listen(splitManager); + this.leaderElectionService.start(); + this.serviceUnitStateChannel.start(); + this.antiAffinityGroupPolicyHelper = + new AntiAffinityGroupPolicyHelper(pulsar, serviceUnitStateChannel); + antiAffinityGroupPolicyHelper.listenFailureDomainUpdate(); + this.antiAffinityGroupPolicyFilter = new AntiAffinityGroupPolicyFilter(antiAffinityGroupPolicyHelper); + this.brokerFilterPipeline.add(antiAffinityGroupPolicyFilter); + SimpleResourceAllocationPolicies policies = new SimpleResourceAllocationPolicies(pulsar); + this.isolationPoliciesHelper = new IsolationPoliciesHelper(policies); + this.brokerFilterPipeline.add(new BrokerIsolationPoliciesFilter(isolationPoliciesHelper)); - try { - this.brokerLoadDataStore = LoadDataStoreFactory - .create(pulsar.getClient(), BROKER_LOAD_DATA_STORE_TOPIC, BrokerLoadData.class); - this.brokerLoadDataStore.startTableView(); - this.topBundlesLoadDataStore = LoadDataStoreFactory - .create(pulsar.getClient(), TOP_BUNDLES_LOAD_DATA_STORE_TOPIC, TopBundlesLoadData.class); - } catch (LoadDataStoreException e) { - throw new PulsarServerException(e); - } + try { + this.brokerLoadDataStore = LoadDataStoreFactory + .create(pulsar, BROKER_LOAD_DATA_STORE_TOPIC, BrokerLoadData.class); + this.topBundlesLoadDataStore = LoadDataStoreFactory + .create(pulsar, TOP_BUNDLES_LOAD_DATA_STORE_TOPIC, TopBundlesLoadData.class); + } catch (LoadDataStoreException e) { + throw new PulsarServerException(e); + } - this.context = LoadManagerContextImpl.builder() - .configuration(conf) - .brokerRegistry(brokerRegistry) - .brokerLoadDataStore(brokerLoadDataStore) - .topBundleLoadDataStore(topBundlesLoadDataStore).build(); - - this.brokerLoadDataReporter = - new BrokerLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), brokerLoadDataStore); - - this.topBundleLoadDataReporter = - new TopBundleLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), topBundlesLoadDataStore); - this.serviceUnitStateChannel.listen(brokerLoadDataReporter); - this.serviceUnitStateChannel.listen(topBundleLoadDataReporter); - var interval = conf.getLoadBalancerReportUpdateMinIntervalMillis(); - this.brokerLoadDataReportTask = this.pulsar.getLoadManagerExecutor() - .scheduleAtFixedRate(() -> { - try { - brokerLoadDataReporter.reportAsync(false); - // TODO: update broker load metrics using getLocalData - } catch (Throwable e) { - log.error("Failed to run the broker load manager executor job.", e); - } - }, - interval, - interval, TimeUnit.MILLISECONDS); - - this.topBundlesLoadDataReportTask = this.pulsar.getLoadManagerExecutor() - .scheduleAtFixedRate(() -> { - try { - // TODO: consider excluding the bundles that are in the process of split. - topBundleLoadDataReporter.reportAsync(false); - } catch (Throwable e) { - log.error("Failed to run the top bundles load manager executor job.", e); - } - }, - interval, - interval, TimeUnit.MILLISECONDS); - - this.monitorTask = this.pulsar.getLoadManagerExecutor() - .scheduleAtFixedRate(() -> { - monitor(); - }, - MONITOR_INTERVAL_IN_MILLIS, - MONITOR_INTERVAL_IN_MILLIS, TimeUnit.MILLISECONDS); - - this.unloadScheduler = new UnloadScheduler( - pulsar, pulsar.getLoadManagerExecutor(), unloadManager, context, - serviceUnitStateChannel, unloadCounter, unloadMetrics); - this.unloadScheduler.start(); - this.splitScheduler = new SplitScheduler( - pulsar, serviceUnitStateChannel, splitManager, splitCounter, splitMetrics, context); - this.splitScheduler.start(); - this.initWaiter.countDown(); - this.started = true; + this.context = LoadManagerContextImpl.builder() + .configuration(conf) + .brokerRegistry(brokerRegistry) + .brokerLoadDataStore(brokerLoadDataStore) + .topBundleLoadDataStore(topBundlesLoadDataStore).build(); + + this.brokerLoadDataReporter = + new BrokerLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), brokerLoadDataStore); + + this.topBundleLoadDataReporter = + new TopBundleLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), topBundlesLoadDataStore); + this.serviceUnitStateChannel.listen(brokerLoadDataReporter); + this.serviceUnitStateChannel.listen(topBundleLoadDataReporter); + var interval = conf.getLoadBalancerReportUpdateMinIntervalMillis(); + this.brokerLoadDataReportTask = this.pulsar.getLoadManagerExecutor() + .scheduleAtFixedRate(() -> { + try { + brokerLoadDataReporter.reportAsync(false); + // TODO: update broker load metrics using getLocalData + } catch (Throwable e) { + log.error("Failed to run the broker load manager executor job.", e); + } + }, + interval, + interval, TimeUnit.MILLISECONDS); + + this.topBundlesLoadDataReportTask = this.pulsar.getLoadManagerExecutor() + .scheduleAtFixedRate(() -> { + try { + // TODO: consider excluding the bundles that are in the process of split. + topBundleLoadDataReporter.reportAsync(false); + } catch (Throwable e) { + log.error("Failed to run the top bundles load manager executor job.", e); + } + }, + interval, + interval, TimeUnit.MILLISECONDS); + + this.monitorTask = this.pulsar.getLoadManagerExecutor() + .scheduleAtFixedRate(() -> { + monitor(); + }, + MONITOR_INTERVAL_IN_MILLIS, + MONITOR_INTERVAL_IN_MILLIS, TimeUnit.MILLISECONDS); + + this.unloadScheduler = new UnloadScheduler( + pulsar, pulsar.getLoadManagerExecutor(), unloadManager, context, + serviceUnitStateChannel, unloadCounter, unloadMetrics); + this.splitScheduler = new SplitScheduler( + pulsar, serviceUnitStateChannel, splitManager, splitCounter, splitMetrics, context); + this.splitScheduler.start(); + this.initWaiter.countDown(); + this.started = true; + log.info("Started load manager."); + } catch (Exception ex) { + log.error("Failed to start the extensible load balance and close broker registry {}.", + this.brokerRegistry, ex); + if (this.brokerRegistry != null) { + brokerRegistry.close(); + } + } } @Override @@ -324,89 +441,167 @@ public CompletableFuture> assign(Optional> future = lookupRequests.computeIfAbsent(bundle, k -> { + return dedupeLookupRequest(bundle, k -> { final CompletableFuture> owner; // Assign the bundle to channel owner if is internal topic, to avoid circular references. if (topic.isPresent() && isInternalTopic(topic.get().toString())) { owner = serviceUnitStateChannel.getChannelOwnerAsync(); } else { - owner = serviceUnitStateChannel.getOwnerAsync(bundle).thenCompose(broker -> { - // If the bundle not assign yet, select and publish assign event to channel. - if (broker.isEmpty()) { - return this.selectAsync(serviceUnit).thenCompose(brokerOpt -> { - if (brokerOpt.isPresent()) { - assignCounter.incrementSuccess(); - log.info("Selected new owner broker: {} for bundle: {}.", brokerOpt.get(), bundle); - return serviceUnitStateChannel.publishAssignEventAsync(bundle, brokerOpt.get()) - .thenApply(Optional::of); - } else { - throw new IllegalStateException( - "Failed to select the new owner broker for bundle: " + bundle); - } - }); + String candidateBrokerId = getHeartbeatOrSLAMonitorBrokerId(serviceUnit); + if (candidateBrokerId != null) { + owner = CompletableFuture.completedFuture(Optional.of(candidateBrokerId)); + } else { + owner = getOrSelectOwnerAsync(serviceUnit, bundle).thenApply(Optional::ofNullable); + } + } + return getBrokerLookupData(owner, bundle); + }); + } + + private String getHeartbeatOrSLAMonitorBrokerId(ServiceUnitId serviceUnit) { + // Check if this is Heartbeat or SLAMonitor namespace + String candidateBroker = NamespaceService.checkHeartbeatNamespace(serviceUnit); + if (candidateBroker == null) { + candidateBroker = NamespaceService.checkHeartbeatNamespaceV2(serviceUnit); + } + if (candidateBroker == null) { + candidateBroker = NamespaceService.getSLAMonitorBrokerName(serviceUnit); + } + if (candidateBroker != null) { + return candidateBroker.substring(candidateBroker.lastIndexOf('/') + 1); + } + return candidateBroker; + } + + private CompletableFuture getOrSelectOwnerAsync(ServiceUnitId serviceUnit, + String bundle) { + return serviceUnitStateChannel.getOwnerAsync(bundle).thenCompose(broker -> { + // If the bundle not assign yet, select and publish assign event to channel. + if (broker.isEmpty()) { + return this.selectAsync(serviceUnit).thenCompose(brokerOpt -> { + if (brokerOpt.isPresent()) { + assignCounter.incrementSuccess(); + log.info("Selected new owner broker: {} for bundle: {}.", brokerOpt.get(), bundle); + return serviceUnitStateChannel.publishAssignEventAsync(bundle, brokerOpt.get()); } - assignCounter.incrementSkip(); - // Already assigned, return it. - return CompletableFuture.completedFuture(broker); + throw new IllegalStateException( + "Failed to select the new owner broker for bundle: " + bundle); }); } - - return owner.thenCompose(broker -> { - if (broker.isEmpty()) { - String errorMsg = String.format( - "Failed to get or assign the owner for bundle:%s", bundle); - log.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - return CompletableFuture.completedFuture(broker.get()); - }).thenCompose(broker -> this.getBrokerRegistry().lookupAsync(broker).thenCompose(brokerLookupData -> { - if (brokerLookupData.isEmpty()) { - String errorMsg = String.format( - "Failed to look up a broker registry:%s for bundle:%s", broker, bundle); - log.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - return CompletableFuture.completedFuture(brokerLookupData); - })); + assignCounter.incrementSkip(); + // Already assigned, return it. + return CompletableFuture.completedFuture(broker.get()); }); - future.whenComplete((r, t) -> { - if (t != null) { + } + + private CompletableFuture> getBrokerLookupData( + CompletableFuture> owner, + String bundle) { + return owner.thenCompose(broker -> { + if (broker.isEmpty()) { + String errorMsg = String.format( + "Failed to get or assign the owner for bundle:%s", bundle); + log.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + return CompletableFuture.completedFuture(broker.get()); + }).thenCompose(broker -> this.getBrokerRegistry().lookupAsync(broker).thenCompose(brokerLookupData -> { + if (brokerLookupData.isEmpty()) { + String errorMsg = String.format( + "Failed to lookup broker:%s for bundle:%s, the broker has not been registered.", + broker, bundle); + log.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + return CompletableFuture.completedFuture(brokerLookupData); + })); + } + + /** + * Method to get the current owner of the NamespaceBundle + * or set the local broker as the owner if absent. + * + * @param namespaceBundle the NamespaceBundle + * @return The ephemeral node data showing the current ownership info in ServiceUnitStateChannel + */ + public CompletableFuture tryAcquiringOwnership(NamespaceBundle namespaceBundle) { + log.info("Try acquiring ownership for bundle: {} - {}.", namespaceBundle, brokerRegistry.getBrokerId()); + final String bundle = namespaceBundle.toString(); + return assign(Optional.empty(), namespaceBundle) + .thenApply(brokerLookupData -> { + if (brokerLookupData.isEmpty()) { + String errorMsg = String.format( + "Failed to get the broker lookup data for bundle:%s", bundle); + log.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + return brokerLookupData.get().toNamespaceEphemeralData(); + }); + } + + private CompletableFuture> dedupeLookupRequest( + String key, Function>> provider) { + final MutableObject>> newFutureCreated = new MutableObject<>(); + try { + return lookupRequests.computeIfAbsent(key, k -> { + CompletableFuture> future = provider.apply(k); + newFutureCreated.setValue(future); + return future; + }); + } finally { + if (newFutureCreated.getValue() != null) { + newFutureCreated.getValue().whenComplete((v, ex) -> { + if (ex != null) { assignCounter.incrementFailure(); } - lookupRequests.remove(bundle); - } - ); - return future; + lookupRequests.remove(key); + }); + } + } } public CompletableFuture> selectAsync(ServiceUnitId bundle) { + return selectAsync(bundle, Collections.emptySet()); + } + + public CompletableFuture> selectAsync(ServiceUnitId bundle, + Set excludeBrokerSet) { BrokerRegistry brokerRegistry = getBrokerRegistry(); return brokerRegistry.getAvailableBrokerLookupDataAsync() - .thenCompose(availableBrokers -> { + .thenComposeAsync(availableBrokers -> { LoadManagerContext context = this.getContext(); - Map availableBrokerCandidates = new HashMap<>(availableBrokers); + Map availableBrokerCandidates = new ConcurrentHashMap<>(availableBrokers); + if (!excludeBrokerSet.isEmpty()) { + for (String exclude : excludeBrokerSet) { + availableBrokerCandidates.remove(exclude); + } + } // Filter out brokers that do not meet the rules. List filterPipeline = getBrokerFilterPipeline(); + ArrayList>> futures = + new ArrayList<>(filterPipeline.size()); for (final BrokerFilter filter : filterPipeline) { - try { - filter.filter(availableBrokerCandidates, bundle, context); - // Preserve the filter successes result. - availableBrokers.keySet().retainAll(availableBrokerCandidates.keySet()); - } catch (BrokerFilterException e) { + CompletableFuture> future = + filter.filterAsync(availableBrokerCandidates, bundle, context); + futures.add(future); + } + CompletableFuture> result = new CompletableFuture<>(); + FutureUtil.waitForAll(futures).whenComplete((__, ex) -> { + if (ex != null) { // TODO: We may need to revisit this error case. - log.error("Failed to filter out brokers.", e); - availableBrokerCandidates = new HashMap<>(availableBrokers); + log.error("Failed to filter out brokers when select bundle: {}", bundle, ex); } - } - if (availableBrokerCandidates.isEmpty()) { - return CompletableFuture.completedFuture(Optional.empty()); - } - Set candidateBrokers = availableBrokerCandidates.keySet(); + if (availableBrokerCandidates.isEmpty()) { + result.complete(Optional.empty()); + return; + } + Set candidateBrokers = availableBrokerCandidates.keySet(); - return CompletableFuture.completedFuture( - getBrokerSelectionStrategy().select(candidateBrokers, bundle, context)); + result.complete(getBrokerSelectionStrategy().select(candidateBrokers, bundle, context)); + }); + return result; }); } @@ -417,15 +612,16 @@ public CompletableFuture checkOwnershipAsync(Optional to } public CompletableFuture> getOwnershipAsync(Optional topic, - ServiceUnitId bundleUnit) { - final String bundle = bundleUnit.toString(); - CompletableFuture> owner; + ServiceUnitId serviceUnit) { + final String bundle = serviceUnit.toString(); if (topic.isPresent() && isInternalTopic(topic.get().toString())) { - owner = serviceUnitStateChannel.getChannelOwnerAsync(); - } else { - owner = serviceUnitStateChannel.getOwnerAsync(bundle); + return serviceUnitStateChannel.getChannelOwnerAsync(); } - return owner; + String candidateBroker = getHeartbeatOrSLAMonitorBrokerId(serviceUnit); + if (candidateBroker != null) { + return CompletableFuture.completedFuture(Optional.of(candidateBroker)); + } + return serviceUnitStateChannel.getOwnerAsync(bundle); } public CompletableFuture> getOwnershipWithLookupDataAsync(ServiceUnitId bundleUnit) { @@ -439,6 +635,10 @@ public CompletableFuture> getOwnershipWithLookupDataA public CompletableFuture unloadNamespaceBundleAsync(ServiceUnitId bundle, Optional destinationBroker) { + if (NamespaceService.isSLAOrHeartbeatNamespace(bundle.getNamespaceObject().toString())) { + log.info("Skip unloading namespace bundle: {}.", bundle); + return CompletableFuture.completedFuture(null); + } return getOwnershipAsync(Optional.empty(), bundle) .thenCompose(brokerOpt -> { if (brokerOpt.isEmpty()) { @@ -473,6 +673,10 @@ private CompletableFuture unloadAsync(UnloadDecision unloadDecision, public CompletableFuture splitNamespaceBundleAsync(ServiceUnitId bundle, NamespaceBundleSplitAlgorithm splitAlgorithm, List boundaries) { + if (NamespaceService.isSLAOrHeartbeatNamespace(bundle.getNamespaceObject().toString())) { + log.info("Skip split namespace bundle: {}.", bundle); + return CompletableFuture.completedFuture(null); + } final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle.toString()); final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle.toString()); NamespaceBundle namespaceBundle = @@ -562,82 +766,106 @@ public void close() throws PulsarServerException { } } - private boolean isInternalTopic(String topic) { - return topic.startsWith(ServiceUnitStateChannelImpl.TOPIC) + public static boolean isInternalTopic(String topic) { + return topic.startsWith(TOPIC) || topic.startsWith(BROKER_LOAD_DATA_STORE_TOPIC) || topic.startsWith(TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); } @VisibleForTesting - void playLeader() { - if (role != Leader) { - log.info("This broker:{} is changing the role from {} to {}", - pulsar.getLookupServiceAddress(), role, Leader); - int retry = 0; - while (true) { - try { - initWaiter.await(); - serviceUnitStateChannel.scheduleOwnershipMonitor(); - topBundlesLoadDataStore.startTableView(); - unloadScheduler.start(); + synchronized void playLeader() { + log.info("This broker:{} is setting the role from {} to {}", + pulsar.getBrokerId(), role, Leader); + int retry = 0; + boolean becameFollower = false; + while (!Thread.currentThread().isInterrupted()) { + try { + if (!serviceUnitStateChannel.isChannelOwner()) { + becameFollower = true; break; - } catch (Throwable e) { - log.error("The broker:{} failed to change the role. Retrying {} th ...", - pulsar.getLookupServiceAddress(), ++retry, e); - try { - Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); - } catch (InterruptedException ex) { - log.warn("Interrupted while sleeping."); - } + } + initWaiter.await(); + // Confirm the system topics have been created or create them if they do not exist. + // If the leader has changed, the new leader need to reset + // the local brokerService.topics (by this topic creations). + // Otherwise, the system topic existence check will fail on the leader broker. + createSystemTopics(pulsar); + brokerLoadDataStore.init(); + topBundlesLoadDataStore.init(); + unloadScheduler.start(); + serviceUnitStateChannel.scheduleOwnershipMonitor(); + break; + } catch (Throwable e) { + log.warn("The broker:{} failed to set the role. Retrying {} th ...", + pulsar.getBrokerId(), ++retry, e); + try { + Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); + } catch (InterruptedException ex) { + log.warn("Interrupted while sleeping."); + // preserve thread's interrupt status + Thread.currentThread().interrupt(); } } - role = Leader; - log.info("This broker:{} plays the leader now.", pulsar.getLookupServiceAddress()); } - // flush the load data when the leader is elected. - if (brokerLoadDataReporter != null) { - brokerLoadDataReporter.reportAsync(true); - } - if (topBundleLoadDataReporter != null) { - topBundleLoadDataReporter.reportAsync(true); + if (becameFollower) { + log.warn("The broker:{} became follower while initializing leader role.", pulsar.getBrokerId()); + playFollower(); + return; } + + role = Leader; + log.info("This broker:{} plays the leader now.", pulsar.getBrokerId()); + + // flush the load data when the leader is elected. + brokerLoadDataReporter.reportAsync(true); + topBundleLoadDataReporter.reportAsync(true); } @VisibleForTesting - void playFollower() { - if (role != Follower) { - log.info("This broker:{} is changing the role from {} to {}", - pulsar.getLookupServiceAddress(), role, Follower); - int retry = 0; - while (true) { - try { - initWaiter.await(); - serviceUnitStateChannel.cancelOwnershipMonitor(); - topBundlesLoadDataStore.closeTableView(); - unloadScheduler.close(); + synchronized void playFollower() { + log.info("This broker:{} is setting the role from {} to {}", + pulsar.getBrokerId(), role, Follower); + int retry = 0; + boolean becameLeader = false; + while (!Thread.currentThread().isInterrupted()) { + try { + if (serviceUnitStateChannel.isChannelOwner()) { + becameLeader = true; break; - } catch (Throwable e) { - log.error("The broker:{} failed to change the role. Retrying {} th ...", - pulsar.getLookupServiceAddress(), ++retry, e); - try { - Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); - } catch (InterruptedException ex) { - log.warn("Interrupted while sleeping."); - } + } + initWaiter.await(); + unloadScheduler.close(); + serviceUnitStateChannel.cancelOwnershipMonitor(); + brokerLoadDataStore.init(); + topBundlesLoadDataStore.close(); + topBundlesLoadDataStore.startProducer(); + break; + } catch (Throwable e) { + log.warn("The broker:{} failed to set the role. Retrying {} th ...", + pulsar.getBrokerId(), ++retry, e); + try { + Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); + } catch (InterruptedException ex) { + log.warn("Interrupted while sleeping."); + // preserve thread's interrupt status + Thread.currentThread().interrupt(); } } - role = Follower; - log.info("This broker:{} plays a follower now.", pulsar.getLookupServiceAddress()); } - // flush the load data when the leader is elected. - if (brokerLoadDataReporter != null) { - brokerLoadDataReporter.reportAsync(true); - } - if (topBundleLoadDataReporter != null) { - topBundleLoadDataReporter.reportAsync(true); + if (becameLeader) { + log.warn("This broker:{} became leader while initializing follower role.", pulsar.getBrokerId()); + playLeader(); + return; } + + role = Follower; + log.info("This broker:{} plays a follower now.", pulsar.getBrokerId()); + + // flush the load data when the leader is elected. + brokerLoadDataReporter.reportAsync(true); + topBundleLoadDataReporter.reportAsync(true); } public List getMetrics() { @@ -661,7 +889,9 @@ public List getMetrics() { return metricsCollection; } - private void monitor() { + + @VisibleForTesting + protected void monitor() { try { initWaiter.await(); @@ -669,6 +899,11 @@ private void monitor() { // Periodically check the role in case ZK watcher fails. var isChannelOwner = serviceUnitStateChannel.isChannelOwner(); if (isChannelOwner) { + // System topic config might fail due to the race condition + // with topic policy init(Topic policies cache have not init). + if (!configuredSystemTopics) { + configuredSystemTopics = configureSystemTopics(pulsar); + } if (role != Leader) { log.warn("Current role:{} does not match with the channel ownership:{}. " + "Playing the leader role.", role, isChannelOwner); @@ -685,4 +920,10 @@ private void monitor() { log.error("Failed to get the channel ownership.", e); } } + + public void disableBroker() throws Exception { + serviceUnitStateChannel.cleanOwnerships(); + leaderElectionService.close(); + brokerRegistry.unregister(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java index 1eabbe620e213..cd1561cb70e2d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java @@ -74,7 +74,7 @@ public CompletableFuture checkOwnershipAsync(Optional to @Override public void disableBroker() throws Exception { - this.loadManager.getBrokerRegistry().unregister(); + this.loadManager.disableBroker(); } @Override @@ -118,13 +118,11 @@ public void setLoadReportForceUpdateFlag() { @Override public void writeLoadReportOnZookeeper() throws Exception { // No-op, this operation is not useful, the load data reporter will automatically write. - throw new UnsupportedOperationException(); } @Override public void writeResourceQuotasToZooKeeper() throws Exception { // No-op, this operation is not useful, the load data reporter will automatically write. - throw new UnsupportedOperationException(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannel.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannel.java index 4782a31fe0c56..6e75fe91a914f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannel.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannel.java @@ -206,4 +206,9 @@ public interface ServiceUnitStateChannel extends Closeable { * Cancels the ownership monitor. */ void cancelOwnershipMonitor(); + + /** + * Cleans the service unit ownerships from the current broker's channel. + */ + void cleanOwnerships(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java index bd62c53be6083..f66ed2a5c9062 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java @@ -42,6 +42,7 @@ import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl.MetadataState.Unstable; import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateData.state; import static org.apache.pulsar.common.naming.NamespaceName.SYSTEM_NAMESPACE; +import static org.apache.pulsar.common.topics.TopicCompactionStrategy.TABLE_VIEW_TAG; import static org.apache.pulsar.metadata.api.extended.SessionEvent.SessionLost; import static org.apache.pulsar.metadata.api.extended.SessionEvent.SessionReestablished; import com.google.common.annotations.VisibleForTesting; @@ -53,6 +54,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ScheduledFuture; @@ -66,10 +68,12 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.mutable.MutableInt; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.pulsar.PulsarClusterMetadataSetup; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.loadbalance.LeaderBroker; import org.apache.pulsar.broker.loadbalance.LeaderElectionService; import org.apache.pulsar.broker.loadbalance.extensions.BrokerRegistry; import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; @@ -81,6 +85,7 @@ import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerServiceException; +import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; @@ -90,12 +95,11 @@ import org.apache.pulsar.common.naming.NamespaceBundleFactory; import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; import org.apache.pulsar.common.naming.NamespaceBundles; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.stats.Metrics; +import org.apache.pulsar.common.topics.TopicCompactionStrategy; import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.SessionEvent; @@ -106,7 +110,13 @@ public class ServiceUnitStateChannelImpl implements ServiceUnitStateChannel { TopicDomain.persistent.value(), SYSTEM_NAMESPACE, "loadbalancer-service-unit-state").toString(); + + public static final CompressionType MSG_COMPRESSION_TYPE = CompressionType.ZSTD; private static final long MAX_IN_FLIGHT_STATE_WAITING_TIME_IN_MILLIS = 30 * 1000; // 30sec + + private static final int OWNERSHIP_CLEAN_UP_MAX_WAIT_TIME_IN_MILLIS = 5000; + private static final int OWNERSHIP_CLEAN_UP_WAIT_RETRY_DELAY_IN_MILLIS = 100; + private static final int OWNERSHIP_CLEAN_UP_CONVERGENCE_DELAY_IN_MILLIS = 3000; public static final long VERSION_ID_INIT = 1; // initial versionId private static final long OWNERSHIP_MONITOR_DELAY_TIME_IN_SECS = 60; public static final long MAX_CLEAN_UP_DELAY_TIME_IN_SECS = 3 * 60; // 3 mins @@ -117,9 +127,9 @@ public class ServiceUnitStateChannelImpl implements ServiceUnitStateChannel { private final PulsarService pulsar; private final ServiceConfiguration config; private final Schema schema; - private final ConcurrentOpenHashMap> getOwnerRequests; - private final String lookupServiceAddress; - private final ConcurrentOpenHashMap> cleanupJobs; + private final Map> getOwnerRequests; + private final String brokerId; + private final Map> cleanupJobs; private final StateChangeListeners stateChangeListeners; private ExtensibleLoadManagerImpl loadManager; private BrokerRegistry brokerRegistry; @@ -191,19 +201,29 @@ enum MetadataState { Unstable } + public static ServiceUnitStateChannelImpl newInstance(PulsarService pulsar) { + return new ServiceUnitStateChannelImpl(pulsar); + } + public ServiceUnitStateChannelImpl(PulsarService pulsar) { + this(pulsar, MAX_IN_FLIGHT_STATE_WAITING_TIME_IN_MILLIS, OWNERSHIP_MONITOR_DELAY_TIME_IN_SECS); + } + + @VisibleForTesting + public ServiceUnitStateChannelImpl(PulsarService pulsar, + long inFlightStateWaitingTimeInMillis, + long ownershipMonitorDelayTimeInSecs) { this.pulsar = pulsar; this.config = pulsar.getConfig(); - this.lookupServiceAddress = pulsar.getLookupServiceAddress(); + this.brokerId = pulsar.getBrokerId(); this.schema = Schema.JSON(ServiceUnitStateData.class); - this.getOwnerRequests = ConcurrentOpenHashMap.>newBuilder().build(); - this.cleanupJobs = ConcurrentOpenHashMap.>newBuilder().build(); + this.getOwnerRequests = new ConcurrentHashMap<>(); + this.cleanupJobs = new ConcurrentHashMap<>(); this.stateChangeListeners = new StateChangeListeners(); this.semiTerminalStateWaitingTimeInMillis = config.getLoadBalancerServiceUnitStateTombstoneDelayTimeInSeconds() * 1000; - this.inFlightStateWaitingTimeInMillis = MAX_IN_FLIGHT_STATE_WAITING_TIME_IN_MILLIS; - this.ownershipMonitorDelayTimeInSecs = OWNERSHIP_MONITOR_DELAY_TIME_IN_SECS; + this.inFlightStateWaitingTimeInMillis = inFlightStateWaitingTimeInMillis; + this.ownershipMonitorDelayTimeInSecs = ownershipMonitorDelayTimeInSecs; if (semiTerminalStateWaitingTimeInMillis < inFlightStateWaitingTimeInMillis) { throw new IllegalArgumentException( "Invalid Config: loadBalancerServiceUnitStateCleanUpDelayTimeInSeconds < " @@ -241,7 +261,7 @@ public void scheduleOwnershipMonitor() { }, 0, ownershipMonitorDelayTimeInSecs, SECONDS); log.info("This leader broker:{} started the ownership monitor.", - lookupServiceAddress); + brokerId); } } @@ -250,10 +270,15 @@ public void cancelOwnershipMonitor() { monitorTask.cancel(false); monitorTask = null; log.info("This previous leader broker:{} stopped the ownership monitor.", - lookupServiceAddress); + brokerId); } } + @Override + public void cleanOwnerships() { + doCleanup(brokerId); + } + public synchronized void start() throws PulsarServerException { if (!validateChannelState(LeaderElectionServiceStarted, false)) { throw new IllegalStateException("Invalid channel state:" + channelState.name()); @@ -281,10 +306,13 @@ public synchronized void start() throws PulsarServerException { } } PulsarClusterMetadataSetup.createNamespaceIfAbsent - (pulsar.getPulsarResources(), NamespaceName.SYSTEM_NAMESPACE, config.getClusterName()); + (pulsar.getPulsarResources(), SYSTEM_NAMESPACE, config.getClusterName()); + + ExtensibleLoadManagerImpl.createSystemTopic(pulsar, TOPIC); producer = pulsar.getClient().newProducer(schema) .enableBatching(true) + .compressionType(MSG_COMPRESSION_TYPE) .maxPendingMessages(MAX_OUTSTANDING_PUB_MESSAGES) .blockIfQueueFull(true) .topic(TOPIC) @@ -307,6 +335,13 @@ public synchronized void start() throws PulsarServerException { ServiceUnitStateCompactionStrategy.class.getName())) .create(); tableview.listen((key, value) -> handle(key, value)); + var strategy = (ServiceUnitStateCompactionStrategy) TopicCompactionStrategy.getInstance(TABLE_VIEW_TAG); + if (strategy == null) { + String err = TABLE_VIEW_TAG + "tag TopicCompactionStrategy is null."; + log.error(err); + throw new IllegalStateException(err); + } + strategy.setSkippedMsgHandler((key, value) -> handleSkippedEvent(key)); if (debug) { log.info("Successfully started the channel tableview."); } @@ -407,19 +442,8 @@ public CompletableFuture> getChannelOwnerAsync() { new IllegalStateException("Invalid channel state:" + channelState.name())); } - return leaderElectionService.readCurrentLeader().thenApply(leader -> { - //expecting http://broker-xyz:port - // TODO: discard this protocol prefix removal - // by a util func that returns lookupServiceAddress(serviceUrl) - if (leader.isPresent()) { - String broker = leader.get().getServiceUrl(); - broker = broker.substring(broker.lastIndexOf('/') + 1); - return Optional.of(broker); - } else { - return Optional.empty(); - } - } - ); + return leaderElectionService.readCurrentLeader() + .thenApply(leader -> leader.map(LeaderBroker::getBrokerId)); } public CompletableFuture isChannelOwnerAsync() { @@ -461,7 +485,35 @@ public boolean isOwner(String serviceUnit, String targetBroker) { } public boolean isOwner(String serviceUnit) { - return isOwner(serviceUnit, lookupServiceAddress); + return isOwner(serviceUnit, brokerId); + } + + private CompletableFuture> getActiveOwnerAsync( + String serviceUnit, + ServiceUnitState state, + Optional owner) { + return deferGetOwnerRequest(serviceUnit) + .thenCompose(newOwner -> { + if (newOwner == null) { + return CompletableFuture.completedFuture(null); + } + + return brokerRegistry.lookupAsync(newOwner) + .thenApply(lookupData -> { + if (lookupData.isPresent()) { + return newOwner; + } else { + throw new IllegalStateException( + "The new owner " + newOwner + " is inactive."); + } + }); + }).whenComplete((__, e) -> { + if (e != null) { + log.error("{} failed to get active owner broker. serviceUnit:{}, state:{}, owner:{}", + brokerId, serviceUnit, state, owner, e); + ownerLookUpCounters.get(state).getFailure().incrementAndGet(); + } + }).thenApply(Optional::ofNullable); } public CompletableFuture> getOwnerAsync(String serviceUnit) { @@ -475,18 +527,22 @@ public CompletableFuture> getOwnerAsync(String serviceUnit) { ownerLookUpCounters.get(state).getTotal().incrementAndGet(); switch (state) { case Owned -> { - return CompletableFuture.completedFuture(Optional.of(data.dstBroker())); + return getActiveOwnerAsync(serviceUnit, state, Optional.of(data.dstBroker())); } case Splitting -> { - return CompletableFuture.completedFuture(Optional.of(data.sourceBroker())); + return getActiveOwnerAsync(serviceUnit, state, Optional.of(data.sourceBroker())); } case Assigning, Releasing -> { - return deferGetOwnerRequest(serviceUnit).whenComplete((__, e) -> { - if (e != null) { - ownerLookUpCounters.get(state).getFailure().incrementAndGet(); - } - }).thenApply( - broker -> broker == null ? Optional.empty() : Optional.of(broker)); + if (isTargetBroker(data.dstBroker())) { + return getActiveOwnerAsync(serviceUnit, state, Optional.of(data.dstBroker())); + } + // If this broker is not the dst broker, return the dst broker as the owner(or empty). + // Clients need to connect(redirect) to the dst broker anyway + // and wait for the dst broker to receive `Owned`. + // This is also required to return getOwnerAsync on the src broker immediately during unloading. + // Otherwise, topic creation(getOwnerAsync) could block unloading bundles, + // if the topic creation(getOwnerAsync) happens during unloading on the src broker. + return CompletableFuture.completedFuture(Optional.ofNullable(data.dstBroker())); } case Init, Free -> { return CompletableFuture.completedFuture(Optional.empty()); @@ -504,6 +560,25 @@ public CompletableFuture> getOwnerAsync(String serviceUnit) { } } + private Optional getOwner(String serviceUnit) { + ServiceUnitStateData data = tableview.get(serviceUnit); + ServiceUnitState state = state(data); + switch (state) { + case Owned -> { + return Optional.of(data.dstBroker()); + } + case Splitting -> { + return Optional.of(data.sourceBroker()); + } + case Init, Free -> { + return Optional.empty(); + } + default -> { + return null; + } + } + } + private long getNextVersionId(String serviceUnit) { var data = tableview.get(serviceUnit); return getNextVersionId(data); @@ -596,9 +671,9 @@ public CompletableFuture publishSplitEventAsync(Split split) { private void handle(String serviceUnit, ServiceUnitStateData data) { long totalHandledRequests = getHandlerTotalCounter(data).incrementAndGet(); - if (log.isDebugEnabled()) { + if (debug()) { log.info("{} received a handle request for serviceUnit:{}, data:{}. totalHandledRequests:{}", - lookupServiceAddress, serviceUnit, data, totalHandledRequests); + brokerId, serviceUnit, data, totalHandledRequests); } ServiceUnitState state = state(data); @@ -657,12 +732,12 @@ private AtomicLong getHandlerCounter(ServiceUnitStateData data, boolean total) { private void log(Throwable e, String serviceUnit, ServiceUnitStateData data, ServiceUnitStateData next) { if (e == null) { - if (log.isDebugEnabled() || isTransferCommand(data)) { + if (debug() || isTransferCommand(data)) { long handlerTotalCount = getHandlerTotalCounter(data).get(); long handlerFailureCount = getHandlerFailureCounter(data).get(); log.info("{} handled {} event for serviceUnit:{}, cur:{}, next:{}, " + "totalHandledRequests:{}, totalFailedRequests:{}", - lookupServiceAddress, getLogEventTag(data), serviceUnit, + brokerId, getLogEventTag(data), serviceUnit, data == null ? "" : data, next == null ? "" : next, handlerTotalCount, handlerFailureCount @@ -673,7 +748,7 @@ lookupServiceAddress, getLogEventTag(data), serviceUnit, long handlerFailureCount = getHandlerFailureCounter(data).incrementAndGet(); log.error("{} failed to handle {} event for serviceUnit:{}, cur:{}, next:{}, " + "totalHandledRequests:{}, totalFailedRequests:{}", - lookupServiceAddress, getLogEventTag(data), serviceUnit, + brokerId, getLogEventTag(data), serviceUnit, data == null ? "" : data, next == null ? "" : next, handlerTotalCount, handlerFailureCount, @@ -681,15 +756,34 @@ lookupServiceAddress, getLogEventTag(data), serviceUnit, } } + private void handleSkippedEvent(String serviceUnit) { + var getOwnerRequest = getOwnerRequests.get(serviceUnit); + if (getOwnerRequest != null) { + var data = tableview.get(serviceUnit); + if (data != null && data.state() == Owned) { + getOwnerRequest.complete(data.dstBroker()); + getOwnerRequests.remove(serviceUnit); + stateChangeListeners.notify(serviceUnit, data, null); + } + } + } + private void handleOwnEvent(String serviceUnit, ServiceUnitStateData data) { var getOwnerRequest = getOwnerRequests.remove(serviceUnit); if (getOwnerRequest != null) { + if (debug()) { + log.info("Returned owner request for serviceUnit:{}", serviceUnit); + } getOwnerRequest.complete(data.dstBroker()); } stateChangeListeners.notify(serviceUnit, data, null); if (isTargetBroker(data.dstBroker())) { log(null, serviceUnit, data, null); + pulsar.getNamespaceService() + .onNamespaceBundleOwned(LoadManagerShared.getNamespaceBundle(pulsar, serviceUnit)); lastOwnEventHandledAt = System.currentTimeMillis(); + } else if (data.force() && isTargetBroker(data.sourceBroker())) { + closeServiceUnit(serviceUnit); } } @@ -731,9 +825,14 @@ private void handleFreeEvent(String serviceUnit, ServiceUnitStateData data) { if (getOwnerRequest != null) { getOwnerRequest.complete(null); } - stateChangeListeners.notify(serviceUnit, data, null); + if (isTargetBroker(data.sourceBroker())) { - log(null, serviceUnit, data, null); + stateChangeListeners.notifyOnCompletion( + data.force() ? closeServiceUnit(serviceUnit) + : CompletableFuture.completedFuture(0), serviceUnit, data) + .whenComplete((__, e) -> log(e, serviceUnit, data, null)); + } else { + stateChangeListeners.notify(serviceUnit, data, null); } } @@ -783,39 +882,67 @@ private boolean isTargetBroker(String broker) { if (broker == null) { return false; } - return broker.equals(lookupServiceAddress); - } - - private NamespaceBundle getNamespaceBundle(String bundle) { - final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle); - final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle); - return pulsar.getNamespaceService().getNamespaceBundleFactory().getBundle(namespaceName, bundleRange); + return broker.equals(brokerId); } private CompletableFuture deferGetOwnerRequest(String serviceUnit) { - return getOwnerRequests - .computeIfAbsent(serviceUnit, k -> { - CompletableFuture future = new CompletableFuture<>(); - future.orTimeout(inFlightStateWaitingTimeInMillis, TimeUnit.MILLISECONDS) - .whenComplete((v, e) -> { - if (e != null) { - getOwnerRequests.remove(serviceUnit, future); - log.warn("Failed to getOwner for serviceUnit:{}", - serviceUnit, e); - } - } - ); - return future; + + var requested = new MutableObject>(); + try { + return getOwnerRequests + .computeIfAbsent(serviceUnit, k -> { + var ownerBefore = getOwner(serviceUnit); + if (ownerBefore != null && ownerBefore.isPresent()) { + // Here, we do a quick active check first with the computeIfAbsent lock + brokerRegistry.lookupAsync(ownerBefore.get()).getNow(Optional.empty()) + .ifPresent(__ -> requested.setValue( + CompletableFuture.completedFuture(ownerBefore.get()))); + + if (requested.getValue() != null) { + return requested.getValue(); + } + } + + + CompletableFuture future = + new CompletableFuture().orTimeout(inFlightStateWaitingTimeInMillis, + TimeUnit.MILLISECONDS) + .exceptionally(e -> { + var ownerAfter = getOwner(serviceUnit); + log.warn("{} failed to wait for owner for serviceUnit:{}; Trying to " + + "return the current owner:{}", + brokerId, serviceUnit, ownerAfter, e); + if (ownerAfter == null) { + throw new IllegalStateException(e); + } + return ownerAfter.orElse(null); + }); + if (debug()) { + log.info("{} is waiting for owner for serviceUnit:{}", brokerId, serviceUnit); + } + requested.setValue(future); + return future; + }); + } finally { + var future = requested.getValue(); + if (future != null) { + future.whenComplete((__, e) -> { + getOwnerRequests.remove(serviceUnit); + if (e != null) { + log.warn("{} failed to getOwner for serviceUnit:{}", brokerId, serviceUnit, e); + } }); + } + } } private CompletableFuture closeServiceUnit(String serviceUnit) { long startTime = System.nanoTime(); MutableInt unloadedTopics = new MutableInt(); - NamespaceBundle bundle = getNamespaceBundle(serviceUnit); + NamespaceBundle bundle = LoadManagerShared.getNamespaceBundle(pulsar, serviceUnit); return pulsar.getBrokerService().unloadServiceUnit( bundle, - false, + true, pulsar.getConfig().getNamespaceBundleUnloadingTimeoutMs(), TimeUnit.MILLISECONDS) .thenApply(numUnloadedTopics -> { @@ -825,6 +952,7 @@ private CompletableFuture closeServiceUnit(String serviceUnit) { .whenComplete((__, ex) -> { // clean up topics that failed to unload from the broker ownership cache pulsar.getBrokerService().cleanUnloadedTopicFromCache(bundle); + pulsar.getNamespaceService().onNamespaceBundleUnload(bundle); double unloadBundleTime = TimeUnit.NANOSECONDS .toMillis((System.nanoTime() - startTime)); if (ex != null) { @@ -842,7 +970,7 @@ private CompletableFuture splitServiceUnit(String serviceUnit, ServiceUnit long startTime = System.nanoTime(); NamespaceService namespaceService = pulsar.getNamespaceService(); NamespaceBundleFactory bundleFactory = namespaceService.getNamespaceBundleFactory(); - NamespaceBundle bundle = getNamespaceBundle(serviceUnit); + NamespaceBundle bundle = LoadManagerShared.getNamespaceBundle(pulsar, serviceUnit); CompletableFuture completionFuture = new CompletableFuture<>(); Map> bundleToDestBroker = data.splitServiceUnitToDestBroker(); List boundaries = null; @@ -1079,24 +1207,34 @@ private void handleBrokerDeletionEvent(String broker) { } private void scheduleCleanup(String broker, long delayInSecs) { - cleanupJobs.computeIfAbsent(broker, k -> { - Executor delayed = CompletableFuture - .delayedExecutor(delayInSecs, TimeUnit.SECONDS, pulsar.getLoadManagerExecutor()); - totalInactiveBrokerCleanupScheduledCnt++; - return CompletableFuture - .runAsync(() -> { - try { - doCleanup(broker); - } catch (Throwable e) { - log.error("Failed to run the cleanup job for the broker {}, " - + "totalCleanupErrorCnt:{}.", - broker, totalCleanupErrorCnt.incrementAndGet(), e); - } finally { - cleanupJobs.remove(broker); + var scheduled = new MutableObject>(); + try { + cleanupJobs.computeIfAbsent(broker, k -> { + Executor delayed = CompletableFuture + .delayedExecutor(delayInSecs, TimeUnit.SECONDS, pulsar.getLoadManagerExecutor()); + totalInactiveBrokerCleanupScheduledCnt++; + var future = CompletableFuture + .runAsync(() -> { + try { + doCleanup(broker); + } catch (Throwable e) { + log.error("Failed to run the cleanup job for the broker {}, " + + "totalCleanupErrorCnt:{}.", + broker, totalCleanupErrorCnt.incrementAndGet(), e); + } } - } - , delayed); - }); + , delayed); + scheduled.setValue(future); + return future; + }); + } finally { + var future = scheduled.getValue(); + if (future != null) { + future.whenComplete((v, ex) -> { + cleanupJobs.remove(broker); + }); + } + } log.info("Scheduled ownership cleanup for broker:{} with delay:{} secs. Pending clean jobs:{}.", broker, delayInSecs, cleanupJobs.size()); @@ -1104,74 +1242,131 @@ private void scheduleCleanup(String broker, long delayInSecs) { private ServiceUnitStateData getOverrideInactiveBrokerStateData(ServiceUnitStateData orphanData, - String selectedBroker) { + Optional selectedBroker, + String inactiveBroker) { + + + if (selectedBroker.isEmpty()) { + return new ServiceUnitStateData(Free, null, inactiveBroker, + true, getNextVersionId(orphanData)); + } + if (orphanData.state() == Splitting) { - return new ServiceUnitStateData(Splitting, orphanData.dstBroker(), selectedBroker, + return new ServiceUnitStateData(Splitting, orphanData.dstBroker(), selectedBroker.get(), Map.copyOf(orphanData.splitServiceUnitToDestBroker()), true, getNextVersionId(orphanData)); } else { - return new ServiceUnitStateData(Owned, selectedBroker, true, getNextVersionId(orphanData)); + return new ServiceUnitStateData(Owned, selectedBroker.get(), inactiveBroker, + true, getNextVersionId(orphanData)); } } - private void overrideOwnership(String serviceUnit, ServiceUnitStateData orphanData) { - - Optional selectedBroker = selectBroker(serviceUnit); - if (selectedBroker.isPresent()) { - var override = getOverrideInactiveBrokerStateData(orphanData, selectedBroker.get()); - log.info("Overriding ownership serviceUnit:{} from orphanData:{} to overrideData:{}", - serviceUnit, orphanData, override); - publishOverrideEventAsync(serviceUnit, orphanData, override) - .exceptionally(e -> { - log.error( - "Failed to override the ownership serviceUnit:{} orphanData:{}. " - + "Failed to publish override event. totalCleanupErrorCnt:{}", - serviceUnit, orphanData, totalCleanupErrorCnt.incrementAndGet()); - return null; - }); - } else { - log.error("Failed to override the ownership serviceUnit:{} orphanData:{}. Empty selected broker. " + private void overrideOwnership(String serviceUnit, ServiceUnitStateData orphanData, String inactiveBroker) { + Optional selectedBroker = selectBroker(serviceUnit, inactiveBroker); + if (selectedBroker.isEmpty()) { + log.warn("Empty selected broker for ownership serviceUnit:{} orphanData:{}." + "totalCleanupErrorCnt:{}", serviceUnit, orphanData, totalCleanupErrorCnt.incrementAndGet()); } + var override = getOverrideInactiveBrokerStateData(orphanData, selectedBroker, inactiveBroker); + log.info("Overriding ownership serviceUnit:{} from orphanData:{} to overrideData:{}", + serviceUnit, orphanData, override); + publishOverrideEventAsync(serviceUnit, orphanData, override) + .exceptionally(e -> { + log.error( + "Failed to override the ownership serviceUnit:{} orphanData:{}. " + + "Failed to publish override event. totalCleanupErrorCnt:{}", + serviceUnit, orphanData, totalCleanupErrorCnt.incrementAndGet()); + return null; + }); } + private void waitForCleanups(String broker, boolean excludeSystemTopics, int maxWaitTimeInMillis) { + long started = System.currentTimeMillis(); + while (System.currentTimeMillis() - started < maxWaitTimeInMillis) { + boolean cleaned = true; + for (var etr : tableview.entrySet()) { + var serviceUnit = etr.getKey(); + var data = etr.getValue(); + + if (excludeSystemTopics && serviceUnit.startsWith(SYSTEM_NAMESPACE.toString())) { + continue; + } - private void doCleanup(String broker) { + if (data.state() == Owned && broker.equals(data.dstBroker())) { + cleaned = false; + break; + } + } + if (cleaned) { + try { + MILLISECONDS.sleep(OWNERSHIP_CLEAN_UP_CONVERGENCE_DELAY_IN_MILLIS); + } catch (InterruptedException e) { + log.warn("Interrupted while gracefully waiting for the cleanup convergence."); + } + break; + } else { + try { + MILLISECONDS.sleep(OWNERSHIP_CLEAN_UP_WAIT_RETRY_DELAY_IN_MILLIS); + } catch (InterruptedException e) { + log.warn("Interrupted while delaying the next service unit clean-up. Cleaning broker:{}", + brokerId); + } + } + } + } + + private synchronized void doCleanup(String broker) { long startTime = System.nanoTime(); log.info("Started ownership cleanup for the inactive broker:{}", broker); int orphanServiceUnitCleanupCnt = 0; long totalCleanupErrorCntStart = totalCleanupErrorCnt.get(); - + Map orphanSystemServiceUnits = new HashMap<>(); for (var etr : tableview.entrySet()) { var stateData = etr.getValue(); var serviceUnit = etr.getKey(); var state = state(stateData); - if (StringUtils.equals(broker, stateData.dstBroker())) { - if (isActiveState(state)) { - overrideOwnership(serviceUnit, stateData); - orphanServiceUnitCleanupCnt++; - } - - } else if (StringUtils.equals(broker, stateData.sourceBroker())) { - if (isInFlightState(state)) { - overrideOwnership(serviceUnit, stateData); - orphanServiceUnitCleanupCnt++; + if (StringUtils.equals(broker, stateData.dstBroker()) && isActiveState(state) + || StringUtils.equals(broker, stateData.sourceBroker()) && isInFlightState(state)) { + if (serviceUnit.startsWith(SYSTEM_NAMESPACE.toString())) { + orphanSystemServiceUnits.put(serviceUnit, stateData); + } else { + overrideOwnership(serviceUnit, stateData, broker); } + orphanServiceUnitCleanupCnt++; } } try { producer.flush(); } catch (PulsarClientException e) { - log.error("Failed to flush the in-flight messages.", e); + log.error("Failed to flush the in-flight non-system bundle override messages.", e); } + if (orphanServiceUnitCleanupCnt > 0) { + // System bundles can contain this channel's system topic and other important system topics. + // Cleaning such system bundles(closing the system topics) together with the non-system bundles + // can cause the cluster to be temporarily unstable. + // Hence, we clean the non-system bundles first and gracefully wait for them. + // After that, we clean the system bundles, if any. + waitForCleanups(broker, true, OWNERSHIP_CLEAN_UP_MAX_WAIT_TIME_IN_MILLIS); this.totalOrphanServiceUnitCleanupCnt += orphanServiceUnitCleanupCnt; this.totalInactiveBrokerCleanupCnt++; } + // clean system bundles in the end + for (var orphanSystemServiceUnit : orphanSystemServiceUnits.entrySet()) { + log.info("Overriding orphan system service unit:{}", orphanSystemServiceUnit.getKey()); + overrideOwnership(orphanSystemServiceUnit.getKey(), orphanSystemServiceUnit.getValue(), broker); + } + + try { + producer.flush(); + } catch (PulsarClientException e) { + log.error("Failed to flush the in-flight system bundle override messages.", e); + } + double cleanupTime = TimeUnit.NANOSECONDS .toMillis((System.nanoTime() - startTime)); @@ -1185,14 +1380,15 @@ private void doCleanup(String broker) { broker, cleanupTime, orphanServiceUnitCleanupCnt, - totalCleanupErrorCntStart - totalCleanupErrorCnt.get(), + totalCleanupErrorCnt.get() - totalCleanupErrorCntStart, printCleanupMetrics()); } - private Optional selectBroker(String serviceUnit) { + private Optional selectBroker(String serviceUnit, String inactiveBroker) { try { - return loadManager.selectAsync(getNamespaceBundle(serviceUnit)) + return loadManager.selectAsync( + LoadManagerShared.getNamespaceBundle(pulsar, serviceUnit), Set.of(inactiveBroker)) .get(inFlightStateWaitingTimeInMillis, MILLISECONDS); } catch (Throwable e) { log.error("Failed to select a broker for serviceUnit:{}", serviceUnit); @@ -1200,8 +1396,10 @@ private Optional selectBroker(String serviceUnit) { return Optional.empty(); } - private Optional getRollForwardStateData(String serviceUnit, long nextVersionId) { - Optional selectedBroker = selectBroker(serviceUnit); + private Optional getRollForwardStateData(String serviceUnit, + String inactiveBroker, + long nextVersionId) { + Optional selectedBroker = selectBroker(serviceUnit, inactiveBroker); if (selectedBroker.isEmpty()) { return Optional.empty(); } @@ -1216,7 +1414,7 @@ private Optional getOverrideInFlightStateData( var state = orphanData.state(); switch (state) { case Assigning: { - return getRollForwardStateData(serviceUnit, nextVersionId); + return getRollForwardStateData(serviceUnit, orphanData.dstBroker(), nextVersionId); } case Splitting: { return Optional.of(new ServiceUnitStateData(Splitting, @@ -1229,7 +1427,7 @@ private Optional getOverrideInFlightStateData( // rollback to the src return Optional.of(new ServiceUnitStateData(Owned, orphanData.sourceBroker(), true, nextVersionId)); } else { - return getRollForwardStateData(serviceUnit, nextVersionId); + return getRollForwardStateData(serviceUnit, orphanData.sourceBroker(), nextVersionId); } } default: { @@ -1279,16 +1477,21 @@ protected void monitorOwnerships(List brokers) { String srcBroker = stateData.sourceBroker(); var state = stateData.state(); - if (isActiveState(state)) { - if (StringUtils.isNotBlank(srcBroker) && !activeBrokers.contains(srcBroker)) { - inactiveBrokers.add(srcBroker); - } else if (StringUtils.isNotBlank(dstBroker) && !activeBrokers.contains(dstBroker)) { - inactiveBrokers.add(dstBroker); - } else if (isInFlightState(state) - && now - stateData.timestamp() > inFlightStateWaitingTimeInMillis) { - orphanServiceUnits.put(serviceUnit, stateData); - } - } else if (now - stateData.timestamp() > semiTerminalStateWaitingTimeInMillis) { + if (isActiveState(state) && StringUtils.isNotBlank(srcBroker) && !activeBrokers.contains(srcBroker)) { + inactiveBrokers.add(srcBroker); + continue; + } + if (isActiveState(state) && StringUtils.isNotBlank(dstBroker) && !activeBrokers.contains(dstBroker)) { + inactiveBrokers.add(dstBroker); + continue; + } + if (isActiveState(state) && isInFlightState(state) + && now - stateData.timestamp() > inFlightStateWaitingTimeInMillis) { + orphanServiceUnits.put(serviceUnit, stateData); + continue; + } + + if (!isActiveState(state) && now - stateData.timestamp() > semiTerminalStateWaitingTimeInMillis) { log.info("Found semi-terminal states to tombstone" + " serviceUnit:{}, stateData:{}", serviceUnit, stateData); tombstoneAsync(serviceUnit).whenComplete((__, e) -> { @@ -1366,7 +1569,7 @@ protected void monitorOwnerships(List brokers) { inactiveBrokers, inactiveBrokers.size(), orphanServiceUnitCleanupCnt, serviceUnitTombstoneCleanupCnt, - totalCleanupErrorCntStart - totalCleanupErrorCnt.get(), + totalCleanupErrorCnt.get() - totalCleanupErrorCntStart, printCleanupMetrics()); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategy.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategy.java index ceb3ea3e9cb6c..6a98b79be81d0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategy.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategy.java @@ -22,6 +22,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateData.state; import com.google.common.annotations.VisibleForTesting; +import java.util.function.BiConsumer; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.topics.TopicCompactionStrategy; @@ -29,6 +30,7 @@ public class ServiceUnitStateCompactionStrategy implements TopicCompactionStrategy { private final Schema schema; + private BiConsumer skippedMsgHandler; private boolean checkBrokers = true; @@ -36,6 +38,17 @@ public ServiceUnitStateCompactionStrategy() { schema = Schema.JSON(ServiceUnitStateData.class); } + public void setSkippedMsgHandler(BiConsumer skippedMsgHandler) { + this.skippedMsgHandler = skippedMsgHandler; + } + + @Override + public void handleSkippedMessage(String key, ServiceUnitStateData cur) { + if (skippedMsgHandler != null) { + skippedMsgHandler.accept(key, cur); + } + } + @Override public Schema getSchema() { return schema; @@ -52,9 +65,13 @@ public boolean shouldKeepLeft(ServiceUnitStateData from, ServiceUnitStateData to return false; } - // Skip the compaction case where from = null and to.versionId > 1 - if (from != null && from.versionId() + 1 != to.versionId()) { - return true; + if (from != null) { + if (from.versionId() == Long.MAX_VALUE && to.versionId() == Long.MIN_VALUE) { // overflow + } else if (from.versionId() >= to.versionId()) { + return true; + } else if (from.versionId() < to.versionId() - 1) { // Compacted + return false; + } // else from.versionId() == to.versionId() - 1 // continue to check further } if (to.force()) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/AntiAffinityGroupPolicyFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/AntiAffinityGroupPolicyFilter.java index 462f8f0e3597a..37b35d8661dd8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/AntiAffinityGroupPolicyFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/AntiAffinityGroupPolicyFilter.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.loadbalance.extensions.filter; import java.util.Map; +import java.util.concurrent.CompletableFuture; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.loadbalance.extensions.policies.AntiAffinityGroupPolicyHelper; @@ -38,10 +39,9 @@ public AntiAffinityGroupPolicyFilter(AntiAffinityGroupPolicyHelper helper) { } @Override - public Map filter( + public CompletableFuture> filterAsync( Map brokers, ServiceUnitId serviceUnitId, LoadManagerContext context) { - helper.filter(brokers, serviceUnitId.toString()); - return brokers; + return helper.filterAsync(brokers, serviceUnitId.toString()); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilter.java index d9cbfdc391ed4..2950a0133899c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilter.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.loadbalance.extensions.filter; import java.util.Map; +import java.util.concurrent.CompletableFuture; import org.apache.pulsar.broker.loadbalance.BrokerFilterException; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; @@ -42,9 +43,23 @@ public interface BrokerFilter { * @param context The load manager context. * @return Filtered broker list. */ - Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) - throws BrokerFilterException; + @Deprecated + default Map filter(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) throws BrokerFilterException { + return filterAsync(brokers, serviceUnit, context).join(); + } + + /** + * Filter out async unqualified brokers based on implementation. + * + * @param brokers The full broker and lookup data. + * @param serviceUnit The current serviceUnit. + * @param context The load manager context. + * @return Filtered broker list. + */ + CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilter.java index eeb0d9d3a3309..306c4c36f4880 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilter.java @@ -19,9 +19,8 @@ package org.apache.pulsar.broker.loadbalance.extensions.filter; import java.util.Map; -import java.util.Set; +import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.broker.loadbalance.BrokerFilterException; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.loadbalance.extensions.policies.IsolationPoliciesHelper; @@ -45,13 +44,13 @@ public String name() { } @Override - public Map filter(Map availableBrokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) - throws BrokerFilterException { - Set brokerCandidateCache = - isolationPoliciesHelper.applyIsolationPolicies(availableBrokers, serviceUnit); - availableBrokers.keySet().retainAll(brokerCandidateCache); - return availableBrokers; + public CompletableFuture> filterAsync(Map availableBrokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { + return isolationPoliciesHelper.applyIsolationPoliciesAsync(availableBrokers, serviceUnit) + .thenApply(brokerCandidateCache -> { + availableBrokers.keySet().retainAll(brokerCandidateCache); + return availableBrokers; + }); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilter.java index 4ee28a5225a0d..54d2a555e6103 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilter.java @@ -19,7 +19,8 @@ package org.apache.pulsar.broker.loadbalance.extensions.filter; import java.util.Map; -import org.apache.pulsar.broker.loadbalance.BrokerFilterException; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.common.naming.ServiceUnitId; @@ -33,18 +34,19 @@ public String name() { } @Override - public Map filter( + public CompletableFuture> filterAsync( Map brokers, ServiceUnitId serviceUnit, - LoadManagerContext context) - throws BrokerFilterException { + LoadManagerContext context) { if (brokers.isEmpty()) { - return brokers; + return CompletableFuture.completedFuture(brokers); } brokers.entrySet().removeIf(entry -> { BrokerLookupData v = entry.getValue(); - return !v.getLoadManagerClassName().equals(context.brokerConfiguration().getLoadManagerClassName()); + // The load manager class name can be null if the cluster has old version of broker. + return !Objects.equals(v.getLoadManagerClassName(), + context.brokerConfiguration().getLoadManagerClassName()); }); - return brokers; + return CompletableFuture.completedFuture(brokers); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilter.java index 0bceae36bb8c2..472cabf156636 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilter.java @@ -20,7 +20,7 @@ import java.util.Map; import java.util.Optional; -import org.apache.pulsar.broker.loadbalance.BrokerFilterException; +import java.util.concurrent.CompletableFuture; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLoadData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; @@ -36,9 +36,9 @@ public String name() { } @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) throws BrokerFilterException { + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { int loadBalancerBrokerMaxTopics = context.brokerConfiguration().getLoadBalancerBrokerMaxTopics(); brokers.keySet().removeIf(broker -> { Optional brokerLoadDataOpt = context.brokerLoadDataStore().get(broker); @@ -46,6 +46,6 @@ public Map filter(Map broker // TODO: The broker load data might be delayed, so the max topic check might not accurate. return topics >= loadBalancerBrokerMaxTopics; }); - return brokers; + return CompletableFuture.completedFuture(brokers); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilter.java index 7420fcc211309..1af39a6adb5fe 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilter.java @@ -21,13 +21,14 @@ import com.github.zafarkhaja.semver.Version; import java.util.Iterator; import java.util.Map; +import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.BrokerFilterBadVersionException; -import org.apache.pulsar.broker.loadbalance.BrokerFilterException; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.common.naming.ServiceUnitId; +import org.apache.pulsar.common.util.FutureUtil; /** * Filter by broker version. @@ -46,13 +47,12 @@ public class BrokerVersionFilter implements BrokerFilter { * */ @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) - throws BrokerFilterException { + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { ServiceConfiguration conf = context.brokerConfiguration(); if (!conf.isPreferLaterVersions() || brokers.isEmpty()) { - return brokers; + return CompletableFuture.completedFuture(brokers); } Version latestVersion; @@ -63,7 +63,8 @@ public Map filter(Map broker } } catch (Exception ex) { log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage()); - throw new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage()); + return FutureUtil.failedFuture( + new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage())); } int numBrokersLatestVersion = 0; @@ -88,7 +89,7 @@ public Map filter(Map broker if (numBrokersOlderVersion == 0) { log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion); } - return brokers; + return CompletableFuture.completedFuture(brokers); } /** diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManager.java index 4aff77937a5b4..3455b333b0ae7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManager.java @@ -19,9 +19,11 @@ package org.apache.pulsar.broker.loadbalance.extensions.manager; import static org.apache.pulsar.broker.loadbalance.LoadManager.LOADBALANCE_BROKERS_ROOT; +import com.google.common.annotations.VisibleForTesting; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; @@ -48,6 +50,12 @@ public RedirectManager(PulsarService pulsar) { this.brokerLookupDataLockManager = pulsar.getCoordinationService().getLockManager(BrokerLookupData.class); } + @VisibleForTesting + public RedirectManager(PulsarService pulsar, LockManager brokerLookupDataLockManager) { + this.pulsar = pulsar; + this.brokerLookupDataLockManager = brokerLookupDataLockManager; + } + public CompletableFuture> getAvailableBrokerLookupDataAsync() { return brokerLookupDataLockManager.listLocks(LOADBALANCE_BROKERS_ROOT).thenCompose(availableBrokers -> { Map map = new ConcurrentHashMap<>(); @@ -69,7 +77,7 @@ public CompletableFuture> getAvailableBrokerLookup public CompletableFuture> findRedirectLookupResultAsync() { String currentLMClassName = pulsar.getConfiguration().getLoadManagerClassName(); - boolean debug = ExtensibleLoadManagerImpl.debug(pulsar.getConfig(), log); + boolean debug = ExtensibleLoadManagerImpl.debug(pulsar.getConfiguration(), log); return getAvailableBrokerLookupDataAsync().thenApply(lookupDataMap -> { if (lookupDataMap.isEmpty()) { String errorMsg = "No available broker found."; @@ -89,9 +97,10 @@ public CompletableFuture> findRedirectLookupResultAsync() log.warn(errorMsg); throw new IllegalStateException(errorMsg); } - if (latestServiceLookupData.get().getLoadManagerClassName().equals(currentLMClassName)) { + + if (Objects.equals(latestServiceLookupData.get().getLoadManagerClassName(), currentLMClassName)) { if (debug) { - log.info("We don't need to redirect, current load manager class name: {}", + log.info("No need to redirect, current load manager class name: {}", currentLMClassName); } return Optional.empty(); @@ -99,7 +108,7 @@ public CompletableFuture> findRedirectLookupResultAsync() var serviceLookupDataObj = latestServiceLookupData.get(); var candidateBrokers = new ArrayList(); lookupDataMap.forEach((key, value) -> { - if (value.getLoadManagerClassName().equals(serviceLookupDataObj.getLoadManagerClassName())) { + if (Objects.equals(value.getLoadManagerClassName(), serviceLookupDataObj.getLoadManagerClassName())) { candidateBrokers.add(value); } }); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java index 2dde0c4708e41..ffdbbc2af4219 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java @@ -88,6 +88,13 @@ public CompletableFuture waitAsync(CompletableFuture eventPubFuture, @Override public void handleEvent(String serviceUnit, ServiceUnitStateData data, Throwable t) { + if (t != null && inFlightUnloadRequest.containsKey(serviceUnit)) { + if (log.isDebugEnabled()) { + log.debug("Handling {} for service unit {} with exception.", data, serviceUnit, t); + } + this.complete(serviceUnit, t); + return; + } ServiceUnitState state = ServiceUnitStateData.state(data); switch (state) { case Free, Owned -> this.complete(serviceUnit, t); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java index 2f5c32197c1fd..624546fdff837 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java @@ -30,6 +30,8 @@ import org.apache.pulsar.broker.loadbalance.extensions.data.TopBundlesLoadData; import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceAllocationPolicies; +import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; @@ -70,7 +72,8 @@ public void update(Map bundleStats, int topk) { pulsar.getConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled(); for (var etr : bundleStats.entrySet()) { String bundle = etr.getKey(); - if (bundle.startsWith(NamespaceName.SYSTEM_NAMESPACE.toString())) { + // TODO: do not filter system topic while shedding + if (NamespaceService.isSystemServiceNamespace(NamespaceBundle.getBundleNamespace(bundle))) { continue; } if (!isLoadBalancerSheddingBundlesWithPoliciesEnabled && hasPolicies(bundle)) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/AntiAffinityGroupPolicyHelper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/AntiAffinityGroupPolicyHelper.java index 44360bc77d83f..3781c9c95f6a7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/AntiAffinityGroupPolicyHelper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/AntiAffinityGroupPolicyHelper.java @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannel; @@ -41,11 +42,11 @@ public AntiAffinityGroupPolicyHelper(PulsarService pulsar, this.channel = channel; } - public void filter( - Map brokers, String bundle) { - LoadManagerShared.filterAntiAffinityGroupOwnedBrokers(pulsar, bundle, - brokers.keySet(), - channel.getOwnershipEntrySet(), brokerToFailureDomainMap); + public CompletableFuture> filterAsync(Map brokers, + String bundle) { + return LoadManagerShared.filterAntiAffinityGroupOwnedBrokersAsync(pulsar, bundle, + brokers.keySet(), channel.getOwnershipEntrySet(), brokerToFailureDomainMap) + .thenApply(__ -> brokers); } public boolean hasAntiAffinityGroupPolicy(String bundle) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java index 67dc702cc0c9f..56238d6528e60 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java @@ -18,10 +18,9 @@ */ package org.apache.pulsar.broker.loadbalance.extensions.policies; -import io.netty.util.concurrent.FastThreadLocal; -import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; @@ -38,32 +37,22 @@ public IsolationPoliciesHelper(SimpleResourceAllocationPolicies policies) { this.policies = policies; } - private static final FastThreadLocal> localBrokerCandidateCache = new FastThreadLocal<>() { - @Override - protected Set initialValue() { - return new HashSet<>(); - } - }; - - public Set applyIsolationPolicies(Map availableBrokers, - ServiceUnitId serviceUnit) { - Set brokerCandidateCache = localBrokerCandidateCache.get(); - brokerCandidateCache.clear(); - LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, + public CompletableFuture> applyIsolationPoliciesAsync(Map availableBrokers, + ServiceUnitId serviceUnit) { + return LoadManagerShared.applyNamespacePoliciesAsync(serviceUnit, policies, availableBrokers.keySet(), new LoadManagerShared.BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { - BrokerLookupData lookupData = availableBrokers.get(brokerUrl.replace("http://", "")); + public boolean isEnablePersistentTopics(String brokerId) { + BrokerLookupData lookupData = availableBrokers.get(brokerId); return lookupData != null && lookupData.persistentTopicsEnabled(); } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { - BrokerLookupData lookupData = availableBrokers.get(brokerUrl.replace("http://", "")); + public boolean isEnableNonPersistentTopics(String brokerId) { + BrokerLookupData lookupData = availableBrokers.get(brokerId); return lookupData != null && lookupData.nonPersistentTopicsEnabled(); } }); - return brokerCandidateCache; } public boolean hasIsolationPolicy(NamespaceName namespaceName) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/BrokerLoadDataReporter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/BrokerLoadDataReporter.java index b07acfda7f77d..3061969120bb3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/BrokerLoadDataReporter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/BrokerLoadDataReporter.java @@ -55,7 +55,7 @@ public class BrokerLoadDataReporter implements LoadDataReporter, private final BrokerHostUsage brokerHostUsage; - private final String lookupServiceAddress; + private final String brokerId; @Getter private final BrokerLoadData localData; @@ -67,10 +67,10 @@ public class BrokerLoadDataReporter implements LoadDataReporter, private long tombstoneDelayInMillis; public BrokerLoadDataReporter(PulsarService pulsar, - String lookupServiceAddress, + String brokerId, LoadDataStore brokerLoadDataStore) { this.brokerLoadDataStore = brokerLoadDataStore; - this.lookupServiceAddress = lookupServiceAddress; + this.brokerId = brokerId; this.pulsar = pulsar; this.conf = this.pulsar.getConfiguration(); if (SystemUtils.IS_OS_LINUX) { @@ -111,7 +111,7 @@ public CompletableFuture reportAsync(boolean force) { log.info("publishing load report:{}", localData.toString(conf)); } CompletableFuture future = - this.brokerLoadDataStore.pushAsync(this.lookupServiceAddress, newLoadData); + this.brokerLoadDataStore.pushAsync(this.brokerId, newLoadData); future.whenComplete((__, ex) -> { if (ex == null) { localData.setReportedAt(System.currentTimeMillis()); @@ -185,7 +185,7 @@ protected void tombstone() { } var lastSuccessfulTombstonedAt = lastTombstonedAt; lastTombstonedAt = now; // dedup first - brokerLoadDataStore.removeAsync(lookupServiceAddress) + brokerLoadDataStore.removeAsync(brokerId) .whenComplete((__, e) -> { if (e != null) { log.error("Failed to clean broker load data.", e); @@ -209,13 +209,13 @@ public void handleEvent(String serviceUnit, ServiceUnitStateData data, Throwable ServiceUnitState state = ServiceUnitStateData.state(data); switch (state) { case Releasing, Splitting -> { - if (StringUtils.equals(data.sourceBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.sourceBroker(), brokerId)) { localData.clear(); tombstone(); } } case Owned -> { - if (StringUtils.equals(data.dstBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.dstBroker(), brokerId)) { localData.clear(); tombstone(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java index 0fa37d3687c20..43e05ad1ac972 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java @@ -41,7 +41,7 @@ public class TopBundleLoadDataReporter implements LoadDataReporter bundleLoadDataStore; @@ -53,10 +53,10 @@ public class TopBundleLoadDataReporter implements LoadDataReporter bundleLoadDataStore) { this.pulsar = pulsar; - this.lookupServiceAddress = lookupServiceAddress; + this.brokerId = brokerId; this.bundleLoadDataStore = bundleLoadDataStore; this.lastBundleStatsUpdatedAt = 0; this.topKBundles = new TopKBundles(pulsar); @@ -88,7 +88,7 @@ public CompletableFuture reportAsync(boolean force) { if (ExtensibleLoadManagerImpl.debug(pulsar.getConfiguration(), log)) { log.info("Reporting TopBundlesLoadData:{}", topKBundles.getLoadData()); } - return this.bundleLoadDataStore.pushAsync(lookupServiceAddress, topKBundles.getLoadData()) + return this.bundleLoadDataStore.pushAsync(brokerId, topKBundles.getLoadData()) .exceptionally(e -> { log.error("Failed to report top-bundles load data.", e); return null; @@ -106,7 +106,7 @@ protected void tombstone() { } var lastSuccessfulTombstonedAt = lastTombstonedAt; lastTombstonedAt = now; // dedup first - bundleLoadDataStore.removeAsync(lookupServiceAddress) + bundleLoadDataStore.removeAsync(brokerId) .whenComplete((__, e) -> { if (e != null) { log.error("Failed to clean broker load data.", e); @@ -129,12 +129,12 @@ public void handleEvent(String serviceUnit, ServiceUnitStateData data, Throwable ServiceUnitState state = ServiceUnitStateData.state(data); switch (state) { case Releasing, Splitting -> { - if (StringUtils.equals(data.sourceBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.sourceBroker(), brokerId)) { tombstone(); } } case Owned -> { - if (StringUtils.equals(data.dstBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.dstBroker(), brokerId)) { tombstone(); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedder.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedder.java index 07d521a28afa7..cd5c17febc226 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedder.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedder.java @@ -47,7 +47,6 @@ import lombok.experimental.Accessors; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; -import org.apache.pulsar.broker.loadbalance.BrokerFilterException; import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannel; @@ -720,8 +719,10 @@ private boolean isTransferable(LoadManagerContext context, Map candidates = new HashMap<>(availableBrokers); for (var filter : brokerFilterPipeline) { try { - filter.filter(candidates, namespaceBundle, context); - } catch (BrokerFilterException e) { + filter.filterAsync(candidates, namespaceBundle, context) + .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), + TimeUnit.SECONDS); + } catch (InterruptedException | ExecutionException | TimeoutException e) { log.error("Failed to filter brokers with filter: {}", filter.getClass().getName(), e); return false; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java index d6c754c90fcf6..218f57932a56b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java @@ -219,9 +219,8 @@ private static NamespaceUnloadStrategy createNamespaceUnloadStrategy(PulsarServi Thread.currentThread().getContextClassLoader()); log.info("Created namespace unload strategy:{}", unloadStrategy.getClass().getCanonicalName()); } catch (Exception e) { - log.error("Error when trying to create namespace unload strategy: {}", - conf.getLoadBalancerLoadPlacementStrategy(), e); - log.error("create namespace unload strategy failed. using TransferShedder instead."); + log.error("Error when trying to create namespace unload strategy: {}. Using {} instead.", + conf.getLoadBalancerLoadSheddingStrategy(), TransferShedder.class.getCanonicalName(), e); unloadStrategy = new TransferShedder(); } unloadStrategy.initialize(pulsar); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java index 680a36523a214..a7deeeaad8a5c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java @@ -81,9 +81,26 @@ public interface LoadDataStore extends Closeable { */ void closeTableView() throws IOException; + + /** + * Starts the data store (both producer and table view). + */ + void start() throws LoadDataStoreException; + + /** + * Inits the data store (close and start the data store). + */ + void init() throws IOException; + /** * Starts the table view. */ void startTableView() throws LoadDataStoreException; + + /** + * Starts the producer. + */ + void startProducer() throws LoadDataStoreException; + } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java index 18f39abd76b76..bcb2657c67f05 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java @@ -18,15 +18,16 @@ */ package org.apache.pulsar.broker.loadbalance.extensions.store; -import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.broker.PulsarService; /** * The load data store factory, use to create the load data store. */ public class LoadDataStoreFactory { - public static LoadDataStore create(PulsarClient client, String name, Class clazz) + public static LoadDataStore create(PulsarService pulsar, String name, + Class clazz) throws LoadDataStoreException { - return new TableViewLoadDataStoreImpl<>(client, name, clazz); + return new TableViewLoadDataStoreImpl<>(pulsar, name, clazz); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java index a400163ebf122..d916e91716223 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java @@ -23,7 +23,12 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; @@ -35,11 +40,17 @@ * * @param Load data type. */ +@Slf4j public class TableViewLoadDataStoreImpl implements LoadDataStore { - private TableView tableView; + private static final long LOAD_DATA_REPORT_UPDATE_MAX_INTERVAL_MULTIPLIER_BEFORE_RESTART = 2; - private final Producer producer; + private volatile TableView tableView; + private volatile long tableViewLastUpdateTimestamp; + + private volatile Producer producer; + + private final ServiceConfiguration conf; private final PulsarClient client; @@ -47,10 +58,11 @@ public class TableViewLoadDataStoreImpl implements LoadDataStore { private final Class clazz; - public TableViewLoadDataStoreImpl(PulsarClient client, String topic, Class clazz) throws LoadDataStoreException { + public TableViewLoadDataStoreImpl(PulsarService pulsar, String topic, Class clazz) + throws LoadDataStoreException { try { - this.client = client; - this.producer = client.newProducer(Schema.JSON(clazz)).topic(topic).create(); + this.conf = pulsar.getConfiguration(); + this.client = pulsar.getClient(); this.topic = topic; this.clazz = clazz; } catch (Exception e) { @@ -59,40 +71,42 @@ public TableViewLoadDataStoreImpl(PulsarClient client, String topic, Class cl } @Override - public CompletableFuture pushAsync(String key, T loadData) { + public synchronized CompletableFuture pushAsync(String key, T loadData) { + validateProducer(); return producer.newMessage().key(key).value(loadData).sendAsync().thenAccept(__ -> {}); } @Override - public CompletableFuture removeAsync(String key) { + public synchronized CompletableFuture removeAsync(String key) { + validateProducer(); return producer.newMessage().key(key).value(null).sendAsync().thenAccept(__ -> {}); } @Override - public Optional get(String key) { - validateTableViewStart(); + public synchronized Optional get(String key) { + validateTableView(); return Optional.ofNullable(tableView.get(key)); } @Override - public void forEach(BiConsumer action) { - validateTableViewStart(); + public synchronized void forEach(BiConsumer action) { + validateTableView(); tableView.forEach(action); } - public Set> entrySet() { - validateTableViewStart(); + public synchronized Set> entrySet() { + validateTableView(); return tableView.entrySet(); } @Override - public int size() { - validateTableViewStart(); + public synchronized int size() { + validateTableView(); return tableView.size(); } @Override - public void closeTableView() throws IOException { + public synchronized void closeTableView() throws IOException { if (tableView != null) { tableView.close(); tableView = null; @@ -100,10 +114,18 @@ public void closeTableView() throws IOException { } @Override - public void startTableView() throws LoadDataStoreException { + public synchronized void start() throws LoadDataStoreException { + startProducer(); + startTableView(); + } + + @Override + public synchronized void startTableView() throws LoadDataStoreException { if (tableView == null) { try { tableView = client.newTableViewBuilder(Schema.JSON(clazz)).topic(topic).create(); + tableView.forEachAndListen((k, v) -> + tableViewLastUpdateTimestamp = System.currentTimeMillis()); } catch (PulsarClientException e) { tableView = null; throw new LoadDataStoreException(e); @@ -112,17 +134,74 @@ public void startTableView() throws LoadDataStoreException { } @Override - public void close() throws IOException { + public synchronized void startProducer() throws LoadDataStoreException { + if (producer == null) { + try { + producer = client.newProducer(Schema.JSON(clazz)).topic(topic).create(); + } catch (PulsarClientException e) { + producer = null; + throw new LoadDataStoreException(e); + } + } + } + + @Override + public synchronized void close() throws IOException { if (producer != null) { producer.close(); + producer = null; } closeTableView(); } - private void validateTableViewStart() { - if (tableView == null) { - throw new IllegalStateException("table view has not been started"); + @Override + public synchronized void init() throws IOException { + close(); + start(); + } + + private void validateProducer() { + if (producer == null || !producer.isConnected()) { + try { + if (producer != null) { + producer.close(); + } + producer = null; + startProducer(); + log.info("Restarted producer on {}", topic); + } catch (Exception e) { + log.error("Failed to restart producer on {}", topic, e); + throw new RuntimeException(e); + } } } + private void validateTableView() { + String restartReason = null; + + if (tableView == null) { + restartReason = "table view is null"; + } else { + long inactiveDuration = System.currentTimeMillis() - tableViewLastUpdateTimestamp; + long threshold = TimeUnit.MINUTES.toMillis(conf.getLoadBalancerReportUpdateMaxIntervalMinutes()) + * LOAD_DATA_REPORT_UPDATE_MAX_INTERVAL_MULTIPLIER_BEFORE_RESTART; + if (inactiveDuration > threshold) { + restartReason = String.format("inactiveDuration=%d secs > threshold = %d secs", + TimeUnit.MILLISECONDS.toSeconds(inactiveDuration), + TimeUnit.MILLISECONDS.toSeconds(threshold)); + } + } + + if (StringUtils.isNotBlank(restartReason)) { + tableViewLastUpdateTimestamp = 0; + try { + closeTableView(); + startTableView(); + log.info("Restarted tableview on {}, {}", topic, restartReason); + } catch (Exception e) { + log.error("Failed to restart tableview on {}", topic, e); + throw new RuntimeException(e); + } + } + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeight.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeight.java index 902cfdaf73f5f..98986d84b9858 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeight.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeight.java @@ -23,6 +23,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; +import javax.annotation.concurrent.ThreadSafe; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.extensions.LoadManagerContext; @@ -35,14 +36,15 @@ * cause cluster fluctuations due to short-term load jitter. */ @Slf4j +@ThreadSafe public class LeastResourceUsageWithWeight implements BrokerSelectionStrategy { // Maintain this list to reduce object creation. - private final ArrayList bestBrokers; - private final Set noLoadDataBrokers; + private final ThreadLocal> bestBrokers; + private final ThreadLocal> noLoadDataBrokers; public LeastResourceUsageWithWeight() { - this.bestBrokers = new ArrayList<>(); - this.noLoadDataBrokers = new HashSet<>(); + this.bestBrokers = ThreadLocal.withInitial(ArrayList::new); + this.noLoadDataBrokers = ThreadLocal.withInitial(HashSet::new); } // A broker's max resource usage with weight using its historical load and short-term load data with weight. @@ -70,7 +72,6 @@ private double getMaxResourceUsageWithWeight(final String broker, final BrokerLo /** * Find a suitable broker to assign the given bundle to. - * This method is not thread safety. * * @param candidates The candidates for which the bundle may be assigned. * @param bundleToAssign The data for the bundle to assign. @@ -86,6 +87,9 @@ public Optional select( return Optional.empty(); } + ArrayList bestBrokers = this.bestBrokers.get(); + HashSet noLoadDataBrokers = this.noLoadDataBrokers.get(); + bestBrokers.clear(); noLoadDataBrokers.clear(); // Maintain of list of all the best scoring brokers and then randomly @@ -135,9 +139,7 @@ public Optional select( log.info("Assign randomly as none of the brokers are underloaded. candidatesSize:{}, " + "noLoadDataBrokersSize:{}", candidates.size(), noLoadDataBrokers.size()); } - for (String broker : candidates) { - bestBrokers.add(broker); - } + bestBrokers.addAll(candidates); } if (debugMode) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilter.java index 5d6a56ba86960..13e3fdc537e79 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilter.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.loadbalance.impl; +import java.util.Objects; import java.util.Set; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.BrokerFilter; @@ -32,8 +33,8 @@ public void filter(Set brokers, BundleData bundleToAssign, LoadData loadData, ServiceConfiguration conf) throws BrokerFilterException { loadData.getBrokerData().forEach((key, value) -> { - if (!value.getLocalData().getLoadManagerClassName() - .equals(conf.getLoadManagerClassName())) { + // The load manager class name can be null if the cluster has old version of broker. + if (!Objects.equals(value.getLocalData().getLoadManagerClassName(), conf.getLoadManagerClassName())) { brokers.remove(key); } }); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java index 318f37f7f7a97..6d0e6bb907346 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java @@ -140,7 +140,7 @@ private double getTotalCpuUsage(double elapsedTimeSeconds) { } private double getTotalCpuUsageForCGroup(double elapsedTimeSeconds) { - double usage = getCpuUsageForCGroup(); + double usage = (double) getCpuUsageForCGroup(); double currentUsage = usage - lastCpuUsage; lastCpuUsage = usage; return 100 * currentUsage / elapsedTimeSeconds / TimeUnit.SECONDS.toNanos(1); @@ -155,7 +155,8 @@ private double getTotalCpuUsageForCGroup(double elapsedTimeSeconds) { * * * Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this - * far. Real CPU usage should equal the sum substracting the idle cycles, this would include iowait, irq and steal. + * far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include + * cpu, user, nice, system, irq, softirq, steal, guest and guest_nice. */ private double getTotalCpuUsageForEntireHost() { LinuxInfoUtils.ResourceUsage cpuUsageForEntireHost = getCpuUsageForEntireHost(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java index 6818ae03b5280..3d627db6cfa9e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java @@ -21,8 +21,6 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.common.stats.JvmMetrics.getJvmDirectMemoryUsed; import io.netty.util.concurrent.FastThreadLocal; -import java.net.MalformedURLException; -import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -106,59 +104,56 @@ public static void applyNamespacePolicies(final ServiceUnitId serviceUnit, if (isIsolationPoliciesPresent) { LOG.debug("Isolation Policies Present for namespace - [{}]", namespace.toString()); } - for (final String broker : availableBrokers) { - final String brokerUrlString = String.format("http://%s", broker); - URL brokerUrl; + for (final String brokerId : availableBrokers) { + String brokerHost; try { - brokerUrl = new URL(brokerUrlString); - } catch (MalformedURLException e) { - LOG.error("Unable to parse brokerUrl from ResourceUnitId", e); + brokerHost = parseBrokerHost(brokerId); + } catch (IllegalArgumentException e) { + LOG.error("Unable to parse host from {}", brokerId, e); continue; } // todo: in future check if the resource unit has resources to take the namespace if (isIsolationPoliciesPresent) { // note: serviceUnitID is namespace name and ResourceID is brokerName - if (policies.isPrimaryBroker(namespace, brokerUrl.getHost())) { - primariesCache.add(broker); + if (policies.isPrimaryBroker(namespace, brokerHost)) { + primariesCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug("Added Primary Broker - [{}] as possible Candidates for" - + " namespace - [{}] with policies", brokerUrl.getHost(), namespace.toString()); + + " namespace - [{}] with policies", brokerHost, namespace.toString()); } - } else if (policies.isSecondaryBroker(namespace, brokerUrl.getHost())) { - secondaryCache.add(broker); + } else if (policies.isSecondaryBroker(namespace, brokerHost)) { + secondaryCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug( "Added Shared Broker - [{}] as possible " + "Candidates for namespace - [{}] with policies", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Skipping Broker - [{}] not primary broker and not shared" + " for namespace - [{}] ", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } } else { // non-persistent topic can be assigned to only those brokers that enabled for non-persistent topic - if (isNonPersistentTopic - && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerUrlString)) { + if (isNonPersistentTopic && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerId)) { if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because it doesn't support non-persistent namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } - } else if (!isNonPersistentTopic - && !brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerUrlString)) { + } else if (!isNonPersistentTopic && !brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerId)) { // persistent topic can be assigned to only brokers that enabled for persistent-topic if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because broker only supports non-persistent namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } - } else if (policies.isSharedBroker(brokerUrl.getHost())) { - secondaryCache.add(broker); + } else if (policies.isSharedBroker(brokerHost)) { + secondaryCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug("Added Shared Broker - [{}] as possible Candidates for namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } } @@ -181,6 +176,112 @@ public static void applyNamespacePolicies(final ServiceUnitId serviceUnit, } } + private static String parseBrokerHost(String brokerId) { + // use last index to support ipv6 addresses + int lastIdx = brokerId.lastIndexOf(':'); + if (lastIdx > -1) { + return brokerId.substring(0, lastIdx); + } else { + throw new IllegalArgumentException("Invalid brokerId: " + brokerId); + } + } + + public static CompletableFuture> applyNamespacePoliciesAsync( + final ServiceUnitId serviceUnit, final SimpleResourceAllocationPolicies policies, + final Set availableBrokers, final BrokerTopicLoadingPredicate brokerTopicLoadingPredicate) { + NamespaceName namespace = serviceUnit.getNamespaceObject(); + return policies.areIsolationPoliciesPresentAsync(namespace).thenApply(isIsolationPoliciesPresent -> { + final Set brokerCandidateCache = new HashSet<>(); + Set primariesCache = localPrimariesCache.get(); + primariesCache.clear(); + + Set secondaryCache = localSecondaryCache.get(); + secondaryCache.clear(); + boolean isNonPersistentTopic = (serviceUnit instanceof NamespaceBundle) + ? ((NamespaceBundle) serviceUnit).hasNonPersistentTopic() : false; + if (isIsolationPoliciesPresent) { + if (LOG.isDebugEnabled()) { + LOG.debug("Isolation Policies Present for namespace - [{}]", namespace.toString()); + } + } + for (final String brokerId : availableBrokers) { + String brokerHost; + try { + brokerHost = parseBrokerHost(brokerId); + } catch (IllegalArgumentException e) { + LOG.error("Unable to parse host from {}", brokerId, e); + continue; + } + // todo: in future check if the resource unit has resources to take the namespace + if (isIsolationPoliciesPresent) { + // note: serviceUnitID is namespace name and ResourceID is brokerName + if (policies.isPrimaryBroker(namespace, brokerHost)) { + primariesCache.add(brokerId); + if (LOG.isDebugEnabled()) { + LOG.debug("Added Primary Broker - [{}] as possible Candidates for" + + " namespace - [{}] with policies", brokerHost, namespace.toString()); + } + } else if (policies.isSecondaryBroker(namespace, brokerHost)) { + secondaryCache.add(brokerId); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Added Shared Broker - [{}] as possible " + + "Candidates for namespace - [{}] with policies", + brokerHost, namespace.toString()); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Skipping Broker - [{}] not primary broker and not shared" + + " for namespace - [{}] ", brokerHost, namespace.toString()); + } + + } + } else { + // non-persistent topic can be assigned to only those brokers that enabled for non-persistent topic + if (isNonPersistentTopic && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerId)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Filter broker- [{}] because it doesn't support non-persistent namespace - [{}]", + brokerId, namespace.toString()); + } + } else if (!isNonPersistentTopic && !brokerTopicLoadingPredicate + .isEnablePersistentTopics(brokerId)) { + // persistent topic can be assigned to only brokers that enabled for persistent-topic + if (LOG.isDebugEnabled()) { + LOG.debug("Filter broker- [{}] because broker only supports non-persistent " + + "namespace - [{}]", brokerId, namespace.toString()); + } + } else if (policies.isSharedBroker(brokerHost)) { + secondaryCache.add(brokerId); + if (LOG.isDebugEnabled()) { + LOG.debug("Added Shared Broker - [{}] as possible Candidates for namespace - [{}]", + brokerHost, namespace.toString()); + } + } + } + } + if (isIsolationPoliciesPresent) { + brokerCandidateCache.addAll(primariesCache); + if (policies.shouldFailoverToSecondaries(namespace, primariesCache.size())) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Not enough of primaries [{}] available for namespace - [{}], " + + "adding shared [{}] as possible candidate owners", + primariesCache.size(), namespace.toString(), secondaryCache.size()); + } + brokerCandidateCache.addAll(secondaryCache); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Policies not present for namespace - [{}] so only " + + "considering shared [{}] brokers for possible owner", + namespace.toString(), secondaryCache.size()); + } + brokerCandidateCache.addAll(secondaryCache); + } + return brokerCandidateCache; + }); + } /** * Using the given bundles, populate the namespace to bundle range map. * @@ -408,6 +509,22 @@ public static void filterAntiAffinityGroupOwnedBrokers( } } + public static CompletableFuture filterAntiAffinityGroupOwnedBrokersAsync( + final PulsarService pulsar, final String assignedBundleName, + final Set candidates, + Set> bundleOwnershipData, + Map brokerToDomainMap + ) { + if (candidates.isEmpty()) { + return CompletableFuture.completedFuture(null); + } + final String namespaceName = getNamespaceNameFromBundleName(assignedBundleName); + return getAntiAffinityNamespaceOwnedBrokers(pulsar, namespaceName, bundleOwnershipData) + .thenAccept(brokerToAntiAffinityNamespaceCount -> + filterAntiAffinityGroupOwnedBrokers(pulsar, candidates, + brokerToDomainMap, brokerToAntiAffinityNamespaceCount)); + } + /** * It computes least number of namespace owned by any of the domain and then it filters out all the domains that own * namespaces more than this count. @@ -648,9 +765,9 @@ public static boolean shouldAntiAffinityNamespaceUnload( } public interface BrokerTopicLoadingPredicate { - boolean isEnablePersistentTopics(String brokerUrl); + boolean isEnablePersistentTopics(String brokerId); - boolean isEnableNonPersistentTopics(String brokerUrl); + boolean isEnableNonPersistentTopics(String brokerId); } /** @@ -711,4 +828,10 @@ public static void refreshBrokerToFailureDomainMap(PulsarService pulsar, LOG.warn("Failed to get domain-list for cluster {}", e.getMessage()); } } + + public static NamespaceBundle getNamespaceBundle(PulsarService pulsar, String bundle) { + final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle); + final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle); + return pulsar.getNamespaceService().getNamespaceBundleFactory().getBundle(namespaceName, bundleRange); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java index c6f406a7c8a5c..3317ed4cb6290 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.loadbalance.impl; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; import java.util.ArrayList; @@ -107,7 +108,7 @@ public class ModularLoadManagerImpl implements ModularLoadManager { public static final int NUM_SHORT_SAMPLES = 10; // Path to ZNode whose children contain ResourceQuota jsons. - public static final String RESOURCE_QUOTA_ZPATH = "/loadbalance/resource-quota/namespace"; + public static final String RESOURCE_QUOTA_ZPATH = "/loadbalance/resource-quota"; // Path to ZNode containing TimeAverageBrokerData jsons for each broker. public static final String TIME_AVERAGE_BROKER_ZPATH = "/loadbalance/broker-time-average"; @@ -218,15 +219,15 @@ public ModularLoadManagerImpl() { this.bundleBrokerAffinityMap = new ConcurrentHashMap<>(); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { - final BrokerData brokerData = loadData.getBrokerData().get(brokerUrl.replace("http://", "")); + public boolean isEnablePersistentTopics(String brokerId) { + final BrokerData brokerData = loadData.getBrokerData().get(brokerId); return brokerData != null && brokerData.getLocalData() != null && brokerData.getLocalData().isPersistentTopicsEnabled(); } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { - final BrokerData brokerData = loadData.getBrokerData().get(brokerUrl.replace("http://", "")); + public boolean isEnableNonPersistentTopics(String brokerId) { + final BrokerData brokerData = loadData.getBrokerData().get(brokerId); return brokerData != null && brokerData.getLocalData() != null && brokerData.getLocalData().isNonPersistentTopicsEnabled(); } @@ -560,17 +561,6 @@ private void updateBundleData() { bundleData.put(bundle, currentBundleData); } } - - //Remove not active bundle from loadData - for (String bundle : bundleData.keySet()) { - if (!activeBundles.contains(bundle)){ - bundleData.remove(bundle); - if (pulsar.getLeaderElectionService().isLeader()){ - deleteBundleDataFromMetadataStore(bundle); - } - } - } - // Remove all loaded bundles from the preallocated maps. final Map preallocatedBundleData = brokerData.getPreallocatedBundleData(); Set ownedNsBundles = pulsar.getNamespaceService().getOwnedServiceUnits() @@ -605,6 +595,16 @@ private void updateBundleData() { LoadManagerShared.fillNamespaceToBundlesMap(preallocatedBundleData.keySet(), namespaceToBundleRange); } } + + // Remove not active bundle from loadData + for (String bundle : bundleData.keySet()) { + if (!activeBundles.contains(bundle)){ + bundleData.remove(bundle); + if (pulsar.getLeaderElectionService().isLeader()){ + deleteBundleDataFromMetadataStore(bundle); + } + } + } } /** @@ -661,11 +661,24 @@ public synchronized void doLoadShedding() { if (!shouldAntiAffinityNamespaceUnload(namespaceName, bundleRange, broker)) { return; } + NamespaceBundle bundleToUnload = LoadManagerShared.getNamespaceBundle(pulsar, bundle); + Optional destBroker = this.selectBroker(bundleToUnload); + if (!destBroker.isPresent()) { + log.info("[{}] No broker available to unload bundle {} from broker {}", + strategy.getClass().getSimpleName(), bundle, broker); + return; + } + if (destBroker.get().equals(broker)) { + log.warn("[{}] The destination broker {} is the same as the current owner broker for Bundle {}", + strategy.getClass().getSimpleName(), destBroker.get(), bundle); + return; + } - log.info("[{}] Unloading bundle: {} from broker {}", - strategy.getClass().getSimpleName(), bundle, broker); + log.info("[{}] Unloading bundle: {} from broker {} to dest broker {}", + strategy.getClass().getSimpleName(), bundle, broker, destBroker.get()); try { - pulsar.getAdminClient().namespaces().unloadNamespaceBundle(namespaceName, bundleRange); + pulsar.getAdminClient().namespaces() + .unloadNamespaceBundle(namespaceName, bundleRange, destBroker.get()); loadData.getRecentlyUnloadedBundles().put(bundle, System.currentTimeMillis()); } catch (PulsarServerException | PulsarAdminException e) { log.warn("Error when trying to perform load shedding on {} for broker {}", bundle, broker, e); @@ -744,7 +757,7 @@ public boolean shouldAntiAffinityNamespaceUnload(String namespace, String bundle public void checkNamespaceBundleSplit() { if (!conf.isLoadBalancerAutoBundleSplitEnabled() || pulsar.getLeaderElectionService() == null - || !pulsar.getLeaderElectionService().isLeader()) { + || !pulsar.getLeaderElectionService().isLeader() || knownBrokers.size() <= 1) { return; } final boolean unloadSplitBundles = pulsar.getConfiguration().isLoadBalancerAutoUnloadSplitBundlesEnabled(); @@ -839,99 +852,119 @@ public Optional selectBrokerForAssignment(final ServiceUnitId serviceUni // If the given bundle is already in preallocated, return the selected broker. return Optional.of(preallocatedBundleToBroker.get(bundle)); } - final BundleData data = loadData.getBundleData().computeIfAbsent(bundle, - key -> getBundleDataOrDefault(bundle)); - brokerCandidateCache.clear(); - LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, - getAvailableBrokers(), - brokerTopicLoadingPredicate); - // filter brokers which owns topic higher than threshold - LoadManagerShared.filterBrokersWithLargeTopicCount(brokerCandidateCache, loadData, - conf.getLoadBalancerBrokerMaxTopics()); - - // distribute namespaces to domain and brokers according to anti-affinity-group - LoadManagerShared.filterAntiAffinityGroupOwnedBrokers(pulsar, serviceUnit.toString(), - brokerCandidateCache, - brokerToNamespaceToBundleRange, brokerToFailureDomainMap); - - // distribute bundles evenly to candidate-brokers if enable - if (conf.isLoadBalancerDistributeBundlesEvenlyEnabled()) { - LoadManagerShared.removeMostServicingBrokersForNamespace(serviceUnit.toString(), - brokerCandidateCache, - brokerToNamespaceToBundleRange); - if (log.isDebugEnabled()) { - log.debug("enable distribute bundles evenly to candidate-brokers, broker candidate count={}", - brokerCandidateCache.size()); - } + Optional broker = selectBroker(serviceUnit); + if (!broker.isPresent()) { + // If no broker is selected, return empty. + return broker; } - log.info("{} brokers being considered for assignment of {}", brokerCandidateCache.size(), bundle); + // Add new bundle to preallocated. + preallocateBundle(bundle, broker.get()); + return broker; + } + } finally { + selectBrokerForAssignment.observe(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); + } + } - // Use the filter pipeline to finalize broker candidates. - try { - for (BrokerFilter filter : filterPipeline) { - filter.filter(brokerCandidateCache, data, loadData, conf); - } - } catch (BrokerFilterException x) { - // restore the list of brokers to the full set - LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, - getAvailableBrokers(), - brokerTopicLoadingPredicate); - } + private void preallocateBundle(String bundle, String broker) { + final BundleData data = loadData.getBundleData().computeIfAbsent(bundle, + key -> getBundleDataOrDefault(bundle)); + loadData.getBrokerData().get(broker).getPreallocatedBundleData().put(bundle, data); + preallocatedBundleToBroker.put(bundle, broker); + + final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle); + final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle); + final ConcurrentOpenHashMap> namespaceToBundleRange = + brokerToNamespaceToBundleRange + .computeIfAbsent(broker, + k -> ConcurrentOpenHashMap.>newBuilder() + .build()); + synchronized (namespaceToBundleRange) { + namespaceToBundleRange.computeIfAbsent(namespaceName, + k -> ConcurrentOpenHashSet.newBuilder().build()) + .add(bundleRange); + } + } - if (brokerCandidateCache.isEmpty()) { - // restore the list of brokers to the full set - LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, - getAvailableBrokers(), - brokerTopicLoadingPredicate); - } + @VisibleForTesting + Optional selectBroker(final ServiceUnitId serviceUnit) { + synchronized (brokerCandidateCache) { + final String bundle = serviceUnit.toString(); + final BundleData data = loadData.getBundleData().computeIfAbsent(bundle, + key -> getBundleDataOrDefault(bundle)); + brokerCandidateCache.clear(); + LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, + getAvailableBrokers(), + brokerTopicLoadingPredicate); + + // filter brokers which owns topic higher than threshold + LoadManagerShared.filterBrokersWithLargeTopicCount(brokerCandidateCache, loadData, + conf.getLoadBalancerBrokerMaxTopics()); - // Choose a broker among the potentially smaller filtered list, when possible - Optional broker = placementStrategy.selectBroker(brokerCandidateCache, data, loadData, conf); + // distribute namespaces to domain and brokers according to anti-affinity-group + LoadManagerShared.filterAntiAffinityGroupOwnedBrokers(pulsar, bundle, + brokerCandidateCache, + brokerToNamespaceToBundleRange, brokerToFailureDomainMap); + + // distribute bundles evenly to candidate-brokers if enable + if (conf.isLoadBalancerDistributeBundlesEvenlyEnabled()) { + LoadManagerShared.removeMostServicingBrokersForNamespace(bundle, + brokerCandidateCache, + brokerToNamespaceToBundleRange); if (log.isDebugEnabled()) { - log.debug("Selected broker {} from candidate brokers {}", broker, brokerCandidateCache); + log.debug("enable distribute bundles evenly to candidate-brokers, broker candidate count={}", + brokerCandidateCache.size()); } + } - if (!broker.isPresent()) { - // No brokers available - return broker; - } + log.info("{} brokers being considered for assignment of {}", brokerCandidateCache.size(), bundle); - final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0; - final double maxUsage = loadData.getBrokerData().get(broker.get()).getLocalData().getMaxResourceUsage(); - if (maxUsage > overloadThreshold) { - // All brokers that were in the filtered list were overloaded, so check if there is a better broker - LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, - getAvailableBrokers(), - brokerTopicLoadingPredicate); - Optional brokerTmp = - placementStrategy.selectBroker(brokerCandidateCache, data, loadData, conf); - if (brokerTmp.isPresent()) { - broker = brokerTmp; - } + // Use the filter pipeline to finalize broker candidates. + try { + for (BrokerFilter filter : filterPipeline) { + filter.filter(brokerCandidateCache, data, loadData, conf); } + } catch (BrokerFilterException x) { + // restore the list of brokers to the full set + LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, + getAvailableBrokers(), + brokerTopicLoadingPredicate); + } - // Add new bundle to preallocated. - loadData.getBrokerData().get(broker.get()).getPreallocatedBundleData().put(bundle, data); - preallocatedBundleToBroker.put(bundle, broker.get()); - - final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle); - final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle); - final ConcurrentOpenHashMap> namespaceToBundleRange = - brokerToNamespaceToBundleRange - .computeIfAbsent(broker.get(), - k -> ConcurrentOpenHashMap.>newBuilder() - .build()); - synchronized (namespaceToBundleRange) { - namespaceToBundleRange.computeIfAbsent(namespaceName, - k -> ConcurrentOpenHashSet.newBuilder().build()) - .add(bundleRange); - } + if (brokerCandidateCache.isEmpty()) { + // restore the list of brokers to the full set + LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, + getAvailableBrokers(), + brokerTopicLoadingPredicate); + } + + // Choose a broker among the potentially smaller filtered list, when possible + Optional broker = placementStrategy.selectBroker(brokerCandidateCache, data, loadData, conf); + if (log.isDebugEnabled()) { + log.debug("Selected broker {} from candidate brokers {}", broker, brokerCandidateCache); + } + + if (!broker.isPresent()) { + // No brokers available return broker; } - } finally { - selectBrokerForAssignment.observe(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); + + final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0; + final double maxUsage = loadData.getBrokerData().get(broker.get()).getLocalData().getMaxResourceUsage(); + if (maxUsage > overloadThreshold) { + // All brokers that were in the filtered list were overloaded, so check if there is a better broker + LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, + getAvailableBrokers(), + brokerTopicLoadingPredicate); + Optional brokerTmp = + placementStrategy.selectBroker(brokerCandidateCache, data, loadData, conf); + if (brokerTmp.isPresent()) { + broker = brokerTmp; + } + } + return broker; } } @@ -947,14 +980,14 @@ public void start() throws PulsarServerException { // At this point, the ports will be updated with the real port number that the server was assigned Map protocolData = pulsar.getProtocolDataToAdvertise(); - lastData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + lastData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners()); lastData.setProtocols(protocolData); // configure broker-topic mode lastData.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics()); lastData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics()); - localData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + localData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners()); localData.setProtocols(protocolData); localData.setBrokerVersionString(pulsar.getBrokerVersion()); @@ -963,9 +996,9 @@ public void start() throws PulsarServerException { localData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics()); localData.setLoadManagerClassName(conf.getLoadManagerClassName()); - String lookupServiceAddress = pulsar.getLookupServiceAddress(); - brokerZnodePath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; - final String timeAverageZPath = TIME_AVERAGE_BROKER_ZPATH + "/" + lookupServiceAddress; + String brokerId = pulsar.getBrokerId(); + brokerZnodePath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + brokerId; + final String timeAverageZPath = TIME_AVERAGE_BROKER_ZPATH + "/" + brokerId; updateLocalBrokerData(); brokerDataLock = brokersData.acquireLock(brokerZnodePath, localData).join(); @@ -1199,7 +1232,6 @@ public String setNamespaceBundleAffinity(String bundle, String broker) { if (StringUtils.isBlank(broker)) { return this.bundleBrokerAffinityMap.remove(bundle); } - broker = broker.replaceFirst("http[s]?://", ""); return this.bundleBrokerAffinityMap.put(bundle, broker); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java index c61d39cf3159a..c8d81bda1bc13 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.loadbalance.impl; import java.util.List; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -32,7 +31,6 @@ import org.apache.pulsar.common.naming.ServiceUnitId; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.policies.data.loadbalancer.LoadManagerReport; -import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; /** * Wrapper class allowing classes of instance ModularLoadManager to be compatible with the interface LoadManager. @@ -75,20 +73,6 @@ public Optional getLeastLoaded(final ServiceUnitId serviceUnit) { return leastLoadedBroker.map(this::buildBrokerResourceUnit); } - private String getBrokerWebServiceUrl(String broker) { - LocalBrokerData localData = (loadManager).getBrokerLocalData(broker); - if (localData != null) { - return localData.getWebServiceUrl() != null ? localData.getWebServiceUrl() - : localData.getWebServiceUrlTls(); - } - return String.format("http://%s", broker); - } - - private String getBrokerZnodeName(String broker, String webServiceUrl) { - String scheme = webServiceUrl.substring(0, webServiceUrl.indexOf("://")); - return String.format("%s://%s", scheme, broker); - } - @Override public List getLoadBalancingMetrics() { return loadManager.getLoadBalancingMetrics(); @@ -149,10 +133,7 @@ public CompletableFuture> getAvailableBrokersAsync() { } private SimpleResourceUnit buildBrokerResourceUnit (String broker) { - String webServiceUrl = getBrokerWebServiceUrl(broker); - String brokerZnodeName = getBrokerZnodeName(broker, webServiceUrl); - return new SimpleResourceUnit(webServiceUrl, - new PulsarResourceDescription(), Map.of(ResourceUnit.PROPERTY_KEY_BROKER_ZNODE_NAME, brokerZnodeName)); + return new SimpleResourceUnit(broker, new PulsarResourceDescription()); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java index 5e99456971147..8bb27e52298be 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java @@ -211,15 +211,15 @@ public SimpleLoadManagerImpl() { .build(); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { - ResourceUnit ru = new SimpleResourceUnit(brokerUrl, new PulsarResourceDescription()); + public boolean isEnablePersistentTopics(String brokerId) { + ResourceUnit ru = new SimpleResourceUnit(brokerId, new PulsarResourceDescription()); LoadReport loadReport = currentLoadReports.get(ru); return loadReport != null && loadReport.isPersistentTopicsEnabled(); } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { - ResourceUnit ru = new SimpleResourceUnit(brokerUrl, new PulsarResourceDescription()); + public boolean isEnableNonPersistentTopics(String brokerId) { + ResourceUnit ru = new SimpleResourceUnit(brokerId, new PulsarResourceDescription()); LoadReport loadReport = currentLoadReports.get(ru); return loadReport != null && loadReport.isNonPersistentTopicsEnabled(); } @@ -234,7 +234,7 @@ public void initialize(final PulsarService pulsar) { brokerHostUsage = new GenericBrokerHostUsageImpl(pulsar); } this.policies = new SimpleResourceAllocationPolicies(pulsar); - lastLoadReport = new LoadReport(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + lastLoadReport = new LoadReport(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls()); lastLoadReport.setProtocols(pulsar.getProtocolDataToAdvertise()); lastLoadReport.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics()); @@ -266,8 +266,8 @@ public SimpleLoadManagerImpl(PulsarService pulsar) { @Override public void start() throws PulsarServerException { // Register the brokers in metadata store - String lookupServiceAddress = pulsar.getLookupServiceAddress(); - String brokerLockPath = LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; + String brokerId = pulsar.getBrokerId(); + String brokerLockPath = LOADBALANCE_BROKERS_ROOT + "/" + brokerId; try { LoadReport loadReport = null; @@ -653,7 +653,6 @@ public void writeResourceQuotasToZooKeeper() throws Exception { */ private synchronized void doLoadRanking() { ResourceUnitRanking.setCpuUsageByMsgRate(this.realtimeCpuLoadFactor); - String hostname = pulsar.getAdvertisedAddress(); String strategy = this.getLoadBalancerPlacementStrategy(); log.info("doLoadRanking - load balancing strategy: {}", strategy); if (!currentLoadReports.isEmpty()) { @@ -702,8 +701,8 @@ private synchronized void doLoadRanking() { } // update metrics - if (resourceUnit.getResourceId().contains(hostname)) { - updateLoadBalancingMetrics(hostname, finalRank, ranking); + if (resourceUnit.getResourceId().equals(pulsar.getBrokerId())) { + updateLoadBalancingMetrics(pulsar.getAdvertisedAddress(), finalRank, ranking); } } updateBrokerToNamespaceToBundle(); @@ -711,7 +710,7 @@ private synchronized void doLoadRanking() { this.resourceUnitRankings = newResourceUnitRankings; } else { log.info("Leader broker[{}] No ResourceUnits to rank this run, Using Old Ranking", - pulsar.getSafeWebServiceAddress()); + pulsar.getBrokerId()); } } @@ -855,7 +854,7 @@ private synchronized ResourceUnit findBrokerForPlacement(Multimap ConcurrentOpenHashMap.>newBuilder() .build()) @@ -876,7 +875,7 @@ private Multimap getFinalCandidates(ServiceUnitId serviceUni availableBrokersCache.clear(); for (final Set resourceUnits : availableBrokers.values()) { for (final ResourceUnit resourceUnit : resourceUnits) { - availableBrokersCache.add(resourceUnit.getResourceId().replace("http://", "")); + availableBrokersCache.add(resourceUnit.getResourceId()); } } brokerCandidateCache.clear(); @@ -899,7 +898,7 @@ private Multimap getFinalCandidates(ServiceUnitId serviceUni final Long rank = entry.getKey(); final Set resourceUnits = entry.getValue(); for (final ResourceUnit resourceUnit : resourceUnits) { - if (brokerCandidateCache.contains(resourceUnit.getResourceId().replace("http://", ""))) { + if (brokerCandidateCache.contains(resourceUnit.getResourceId())) { result.put(rank, resourceUnit); } } @@ -928,8 +927,7 @@ private Map> getAvailableBrokers(ServiceUnitId serviceUn availableBrokers = new HashMap<>(); for (String broker : activeBrokers) { - ResourceUnit resourceUnit = new SimpleResourceUnit(String.format("http://%s", broker), - new PulsarResourceDescription()); + ResourceUnit resourceUnit = new SimpleResourceUnit(broker, new PulsarResourceDescription()); availableBrokers.computeIfAbsent(0L, key -> new TreeSet<>()).add(resourceUnit); } log.info("Choosing at random from broker list: [{}]", availableBrokers.values()); @@ -956,7 +954,7 @@ private synchronized ResourceUnit getLeastLoadedBroker(ServiceUnitId serviceUnit Iterator> candidateIterator = finalCandidates.entries().iterator(); while (candidateIterator.hasNext()) { Map.Entry candidate = candidateIterator.next(); - String candidateBrokerName = candidate.getValue().getResourceId().replace("http://", ""); + String candidateBrokerName = candidate.getValue().getResourceId(); if (!activeBrokers.contains(candidateBrokerName)) { candidateIterator.remove(); // Current candidate points to an inactive broker, so remove it } @@ -1005,8 +1003,7 @@ private void updateRanking() { try { String key = String.format("%s/%s", LOADBALANCE_BROKERS_ROOT, broker); LoadReport lr = loadReports.readLock(key).join().get(); - ResourceUnit ru = new SimpleResourceUnit(String.format("http://%s", lr.getName()), - fromLoadReport(lr)); + ResourceUnit ru = new SimpleResourceUnit(lr.getName(), fromLoadReport(lr)); this.currentLoadReports.put(ru, lr); } catch (Exception e) { log.warn("Error reading load report from Cache for broker - [{}], [{}]", broker, e); @@ -1072,13 +1069,13 @@ public LoadReport generateLoadReport() throws Exception { private LoadReport generateLoadReportForcefully() throws Exception { synchronized (bundleGainsCache) { try { - LoadReport loadReport = new LoadReport(pulsar.getSafeWebServiceAddress(), + LoadReport loadReport = new LoadReport(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls()); loadReport.setProtocols(pulsar.getProtocolDataToAdvertise()); loadReport.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics()); loadReport.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics()); - loadReport.setName(pulsar.getLookupServiceAddress()); + loadReport.setName(pulsar.getBrokerId()); loadReport.setBrokerVersionString(pulsar.getBrokerVersion()); SystemResourceUsage systemResourceUsage = this.getSystemResourceUsage(); @@ -1121,8 +1118,8 @@ private LoadReport generateLoadReportForcefully() throws Exception { loadReport.setAllocatedMsgRateIn(allocatedQuota.getMsgRateIn()); loadReport.setAllocatedMsgRateOut(allocatedQuota.getMsgRateOut()); - final ResourceUnit resourceUnit = new SimpleResourceUnit( - String.format("http://%s", loadReport.getName()), fromLoadReport(loadReport)); + final ResourceUnit resourceUnit = + new SimpleResourceUnit(loadReport.getName(), fromLoadReport(loadReport)); Set preAllocatedBundles; if (resourceUnitRankings.containsKey(resourceUnit)) { preAllocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles(); @@ -1277,7 +1274,7 @@ private synchronized void updateBrokerToNamespaceToBundle() { final Set preallocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles(); final ConcurrentOpenHashMap> namespaceToBundleRange = brokerToNamespaceToBundleRange - .computeIfAbsent(broker.replace("http://", ""), + .computeIfAbsent(broker, k -> ConcurrentOpenHashMap.>newBuilder() .build()); @@ -1455,7 +1452,6 @@ public String setNamespaceBundleAffinity(String bundle, String broker) { if (StringUtils.isBlank(broker)) { return this.bundleBrokerAffinityMap.remove(bundle); } - broker = broker.replaceFirst("http[s]?://", ""); return this.bundleBrokerAffinityMap.put(bundle, broker); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceAllocationPolicies.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceAllocationPolicies.java index 4a1577a4e28b6..25fbc152b1163 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceAllocationPolicies.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceAllocationPolicies.java @@ -20,6 +20,7 @@ import java.util.Map; import java.util.Optional; +import java.util.concurrent.CompletableFuture; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.loadbalance.LoadReport; import org.apache.pulsar.broker.loadbalance.ResourceUnit; @@ -53,6 +54,20 @@ private Optional getIsolationPolicies(String cluster } } + private CompletableFuture> getIsolationPoliciesAsync(String clusterName) { + return this.pulsar.getPulsarResources().getNamespaceResources() + .getIsolationPolicies().getIsolationDataPoliciesAsync(clusterName); + } + + public CompletableFuture areIsolationPoliciesPresentAsync(NamespaceName namespace) { + return getIsolationPoliciesAsync(pulsar.getConfiguration().getClusterName()) + .thenApply(policies -> { + return policies.filter(isolationPolicies -> + isolationPolicies.getPolicyByNamespace(namespace) != null) + .isPresent(); + }); + } + public boolean areIsolationPoliciesPresent(NamespaceName namespace) { try { Optional policies = diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java index b92af5b7c69f3..d8dcfa007cfc5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java @@ -25,7 +25,7 @@ import org.apache.commons.lang3.mutable.MutableDouble; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.mutable.MutableObject; -import org.apache.commons.lang3.tuple.Triple; +import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.LoadData; import org.apache.pulsar.broker.loadbalance.LoadSheddingStrategy; @@ -36,7 +36,7 @@ /** * This strategy tends to distribute load uniformly across all brokers. This strategy checks load difference between - * broker with highest load and broker with lowest load. If the difference is higher than configured thresholds + * broker with the highest load and broker with the lowest load. If the difference is higher than configured thresholds * {@link ServiceConfiguration#getLoadBalancerMsgRateDifferenceShedderThreshold()} or * {@link ServiceConfiguration#getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold()} then it finds out * bundles which can be unloaded to distribute traffic evenly across all brokers. @@ -63,25 +63,37 @@ public Multimap findBundlesForUnloading(final LoadData loadData, Map loadBundleData = loadData.getBundleDataForLoadShedding(); Map recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); - MutableObject overloadedBroker = new MutableObject<>(); - MutableObject underloadedBroker = new MutableObject<>(); + MutableObject msgRateOverloadedBroker = new MutableObject<>(); + MutableObject msgThroughputOverloadedBroker = new MutableObject<>(); + MutableObject msgRateUnderloadedBroker = new MutableObject<>(); + MutableObject msgThroughputUnderloadedBroker = new MutableObject<>(); MutableDouble maxMsgRate = new MutableDouble(-1); - MutableDouble maxThroughputRate = new MutableDouble(-1); + MutableDouble maxThroughput = new MutableDouble(-1); MutableDouble minMsgRate = new MutableDouble(Integer.MAX_VALUE); - MutableDouble minThroughputRate = new MutableDouble(Integer.MAX_VALUE); + MutableDouble minThroughput = new MutableDouble(Integer.MAX_VALUE); + brokersData.forEach((broker, data) -> { double msgRate = data.getLocalData().getMsgRateIn() + data.getLocalData().getMsgRateOut(); double throughputRate = data.getLocalData().getMsgThroughputIn() + data.getLocalData().getMsgThroughputOut(); - if (msgRate > maxMsgRate.getValue() || throughputRate > maxThroughputRate.getValue()) { - overloadedBroker.setValue(broker); + if (msgRate > maxMsgRate.getValue()) { + msgRateOverloadedBroker.setValue(broker); maxMsgRate.setValue(msgRate); - maxThroughputRate.setValue(throughputRate); } - if (msgRate < minMsgRate.getValue() || throughputRate < minThroughputRate.getValue()) { - underloadedBroker.setValue(broker); + + if (throughputRate > maxThroughput.getValue()) { + msgThroughputOverloadedBroker.setValue(broker); + maxThroughput.setValue(throughputRate); + } + + if (msgRate < minMsgRate.getValue()) { + msgRateUnderloadedBroker.setValue(broker); minMsgRate.setValue(msgRate); - minThroughputRate.setValue(throughputRate); + } + + if (throughputRate < minThroughput.getValue()) { + msgThroughputUnderloadedBroker.setValue(broker); + minThroughput.setValue(throughputRate); } }); @@ -91,12 +103,12 @@ public Multimap findBundlesForUnloading(final LoadData loadData, if (minMsgRate.getValue() <= EPS && minMsgRate.getValue() >= -EPS) { minMsgRate.setValue(1.0); } - if (minThroughputRate.getValue() <= EPS && minThroughputRate.getValue() >= -EPS) { - minThroughputRate.setValue(1.0); + if (minThroughput.getValue() <= EPS && minThroughput.getValue() >= -EPS) { + minThroughput.setValue(1.0); } double msgRateDifferencePercentage = ((maxMsgRate.getValue() - minMsgRate.getValue()) * 100) / (minMsgRate.getValue()); - double msgThroughputDifferenceRate = maxThroughputRate.getValue() / minThroughputRate.getValue(); + double msgThroughputDifferenceRate = maxThroughput.getValue() / minThroughput.getValue(); // if the threshold matches then find out how much load needs to be unloaded by considering number of msgRate // and throughput. @@ -105,66 +117,91 @@ public Multimap findBundlesForUnloading(final LoadData loadData, boolean isMsgThroughputThresholdExceeded = conf .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold() > 0 && msgThroughputDifferenceRate > conf - .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(); + .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(); if (isMsgRateThresholdExceeded || isMsgThroughputThresholdExceeded) { - if (log.isDebugEnabled()) { - log.debug( - "Found bundles for uniform load balancing. " - + "overloaded broker {} with (msgRate,throughput)= ({},{}) " - + "and underloaded broker {} with (msgRate,throughput)= ({},{})", - overloadedBroker.getValue(), maxMsgRate.getValue(), maxThroughputRate.getValue(), - underloadedBroker.getValue(), minMsgRate.getValue(), minThroughputRate.getValue()); - } MutableInt msgRateRequiredFromUnloadedBundles = new MutableInt( (int) ((maxMsgRate.getValue() - minMsgRate.getValue()) * conf.getMaxUnloadPercentage())); MutableInt msgThroughputRequiredFromUnloadedBundles = new MutableInt( - (int) ((maxThroughputRate.getValue() - minThroughputRate.getValue()) + (int) ((maxThroughput.getValue() - minThroughput.getValue()) * conf.getMaxUnloadPercentage())); - LocalBrokerData overloadedBrokerData = brokersData.get(overloadedBroker.getValue()).getLocalData(); - - if (overloadedBrokerData.getBundles().size() > 1 - && (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage() - || msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput())) { - // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with - // under-loaded broker - loadBundleData.entrySet().stream() - .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) - .map((e) -> { - String bundle = e.getKey(); - BundleData bundleData = e.getValue(); - TimeAverageMessageData shortTermData = bundleData.getShortTermData(); - double throughput = isMsgRateThresholdExceeded - ? shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut() - : shortTermData.getMsgThroughputIn() + shortTermData.getMsgThroughputOut(); - return Triple.of(bundle, bundleData, throughput); - }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) - .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { - if (conf.getMaxUnloadBundleNumPerShedding() != -1 - && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { - return; - } - String bundle = e.getLeft(); - BundleData bundleData = e.getMiddle(); - TimeAverageMessageData shortTermData = bundleData.getShortTermData(); - double throughput = shortTermData.getMsgThroughputIn() - + shortTermData.getMsgThroughputOut(); - double bundleMsgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut(); - if (isMsgRateThresholdExceeded) { + if (isMsgRateThresholdExceeded) { + if (log.isDebugEnabled()) { + log.debug("Found bundles for uniform load balancing. " + + "msgRate overloaded broker: {} with msgRate: {}, " + + "msgRate underloaded broker: {} with msgRate: {}", + msgRateOverloadedBroker.getValue(), maxMsgRate.getValue(), + msgRateUnderloadedBroker.getValue(), minMsgRate.getValue()); + } + LocalBrokerData overloadedBrokerData = + brokersData.get(msgRateOverloadedBroker.getValue()).getLocalData(); + if (overloadedBrokerData.getBundles().size() > 1 + && (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage())) { + // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with + // under-loaded broker + loadBundleData.entrySet().stream() + .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) + .map((e) -> { + String bundle = e.getKey(); + TimeAverageMessageData shortTermData = e.getValue().getShortTermData(); + double msgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut(); + return Pair.of(bundle, msgRate); + }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) + .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { + if (conf.getMaxUnloadBundleNumPerShedding() != -1 + && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { + return; + } + String bundle = e.getLeft(); + double bundleMsgRate = e.getRight(); if (bundleMsgRate <= (msgRateRequiredFromUnloadedBundles.getValue() + 1000/* delta */)) { log.info("Found bundle to unload with msgRate {}", bundleMsgRate); msgRateRequiredFromUnloadedBundles.add(-bundleMsgRate); - selectedBundlesCache.put(overloadedBroker.getValue(), bundle); + selectedBundlesCache.put(msgRateOverloadedBroker.getValue(), bundle); } - } else { - if (throughput <= (msgThroughputRequiredFromUnloadedBundles.getValue())) { - log.info("Found bundle to unload with throughput {}", throughput); - msgThroughputRequiredFromUnloadedBundles.add(-throughput); - selectedBundlesCache.put(overloadedBroker.getValue(), bundle); + }); + } + } else { + if (log.isDebugEnabled()) { + log.debug("Found bundles for uniform load balancing. " + + "msgThroughput overloaded broker: {} with msgThroughput {}, " + + "msgThroughput underloaded broker: {} with msgThroughput: {}", + msgThroughputOverloadedBroker.getValue(), maxThroughput.getValue(), + msgThroughputUnderloadedBroker.getValue(), minThroughput.getValue()); + } + LocalBrokerData overloadedBrokerData = + brokersData.get(msgThroughputOverloadedBroker.getValue()).getLocalData(); + if (overloadedBrokerData.getBundles().size() > 1 + && + msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput()) { + // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with + // under-loaded broker + loadBundleData.entrySet().stream() + .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) + .map((e) -> { + String bundle = e.getKey(); + TimeAverageMessageData shortTermData = e.getValue().getShortTermData(); + double msgThroughput = shortTermData.getMsgThroughputIn() + + shortTermData.getMsgThroughputOut(); + return Pair.of(bundle, msgThroughput); + }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) + .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { + if (conf.getMaxUnloadBundleNumPerShedding() != -1 + && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { + return; } - } - }); + String bundle = e.getLeft(); + double msgThroughput = e.getRight(); + if (msgThroughput <= (msgThroughputRequiredFromUnloadedBundles.getValue() + + 1000/* delta */)) { + log.info("Found bundle to unload with msgThroughput {}", msgThroughput); + msgThroughputRequiredFromUnloadedBundles.add(-msgThroughput); + selectedBundlesCache.put(msgThroughputOverloadedBroker.getValue(), bundle); + } + }); + } + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java index 3b64d2a9f8393..7b2c777414884 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java @@ -26,11 +26,11 @@ import java.net.URISyntaxException; import java.util.Optional; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; import javax.ws.rs.Encoded; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; @@ -41,12 +41,14 @@ import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.lookup.data.LookupData; import org.apache.pulsar.common.naming.NamespaceBundle; +import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.NamespaceOperation; import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.metadata.api.MetadataStoreException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,7 +57,7 @@ public class TopicLookupBase extends PulsarWebResource { private static final String LOOKUP_PATH_V1 = "/lookup/v2/destination/"; private static final String LOOKUP_PATH_V2 = "/lookup/v2/topic/"; - protected CompletableFuture internalLookupTopicAsync(TopicName topicName, boolean authoritative, + protected CompletableFuture internalLookupTopicAsync(final TopicName topicName, boolean authoritative, String listenerName) { if (!pulsar().getBrokerService().getLookupRequestSemaphore().tryAcquire()) { log.warn("No broker was found available for topic {}", topicName); @@ -78,7 +80,8 @@ protected CompletableFuture internalLookupTopicAsync(TopicName topic }) .thenCompose(exist -> { if (!exist) { - throw new RestException(Response.Status.NOT_FOUND, "Topic not found."); + throw new RestException(Response.Status.NOT_FOUND, + String.format("Topic not found %s", topicName.toString())); } CompletableFuture> lookupFuture = pulsar().getNamespaceService() .getBrokerServiceUrlAsync(topicName, @@ -130,10 +133,10 @@ protected CompletableFuture internalLookupTopicAsync(TopicName topic pulsar().getBrokerService().getLookupRequestSemaphore().release(); return result.getLookupData(); } - }).exceptionally(ex->{ - pulsar().getBrokerService().getLookupRequestSemaphore().release(); - throw FutureUtil.wrapToCompletionException(ex); }); + }).exceptionally(ex -> { + pulsar().getBrokerService().getLookupRequestSemaphore().release(); + throw FutureUtil.wrapToCompletionException(ex); }); } @@ -221,26 +224,31 @@ public static CompletableFuture lookupTopicAsync(PulsarService pulsarSe // (2) authorize client checkAuthorizationAsync(pulsarService, topicName, clientAppId, authenticationData).thenRun(() -> { // (3) validate global namespace + // It is necessary for system topic operations because system topics are used to store metadata + // and other vital information. Even after namespace starting deletion, + // we need to access the metadata of system topics to create readers and clean up topic data. + // If we don't do this, it can prevent namespace deletion due to inaccessible readers. checkLocalOrGetPeerReplicationCluster(pulsarService, - topicName.getNamespaceObject()).thenAccept(peerClusterData -> { - if (peerClusterData == null) { - // (4) all validation passed: initiate lookup - validationFuture.complete(null); - return; - } - // if peer-cluster-data is present it means namespace is owned by that peer-cluster and - // request should be redirect to the peer-cluster - if (StringUtils.isBlank(peerClusterData.getBrokerServiceUrl()) - && StringUtils.isBlank(peerClusterData.getBrokerServiceUrlTls())) { - validationFuture.complete(newLookupErrorResponse(ServerError.MetadataError, - "Redirected cluster's brokerService url is not configured", - requestId)); - return; - } - validationFuture.complete(newLookupResponse(peerClusterData.getBrokerServiceUrl(), - peerClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, - requestId, - false)); + topicName.getNamespaceObject(), SystemTopicNames.isSystemTopic(topicName)) + .thenAccept(peerClusterData -> { + if (peerClusterData == null) { + // (4) all validation passed: initiate lookup + validationFuture.complete(null); + return; + } + // if peer-cluster-data is present it means namespace is owned by that peer-cluster + // and request should be redirect to the peer-cluster + if (StringUtils.isBlank(peerClusterData.getBrokerServiceUrl()) + && StringUtils.isBlank(peerClusterData.getBrokerServiceUrlTls())) { + validationFuture.complete(newLookupErrorResponse(ServerError.MetadataError, + "Redirected cluster's brokerService url is not configured", + requestId)); + return; + } + validationFuture.complete(newLookupResponse(peerClusterData.getBrokerServiceUrl(), + peerClusterData.getBrokerServiceUrlTls(), true, + LookupType.Redirect, requestId, + false)); }).exceptionally(ex -> { Throwable throwable = FutureUtil.unwrapCompletionException(ex); if (throwable instanceof RestException restException){ @@ -312,35 +320,40 @@ public static CompletableFuture lookupTopicAsync(PulsarService pulsarSe requestId, shouldRedirectThroughServiceUrl(conf, lookupData))); } }).exceptionally(ex -> { - if (ex instanceof CompletionException && ex.getCause() instanceof IllegalStateException) { - log.info("Failed to lookup {} for topic {} with error {}", clientAppId, - topicName.toString(), ex.getCause().getMessage()); - } else { - log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, - topicName.toString(), ex.getMessage(), ex); - } - lookupfuture.complete( - newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId)); - return null; - }); + handleLookupError(lookupfuture, topicName.toString(), clientAppId, requestId, ex); + return null; + }); } - }).exceptionally(ex -> { - if (ex instanceof CompletionException && ex.getCause() instanceof IllegalStateException) { - log.info("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), - ex.getCause().getMessage()); - } else { - log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), - ex.getMessage(), ex); - } - - lookupfuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId)); + handleLookupError(lookupfuture, topicName.toString(), clientAppId, requestId, ex); return null; }); return lookupfuture; } + private static void handleLookupError(CompletableFuture lookupFuture, String topicName, String clientAppId, + long requestId, Throwable ex){ + Throwable unwrapEx = FutureUtil.unwrapCompletionException(ex); + final String errorMsg = unwrapEx.getMessage(); + if (unwrapEx instanceof PulsarServerException) { + unwrapEx = FutureUtil.unwrapCompletionException(unwrapEx.getCause()); + } + if (unwrapEx instanceof IllegalStateException) { + // Current broker still hold the bundle's lock, but the bundle is being unloading. + log.info("Failed to lookup {} for topic {} with error {}", clientAppId, topicName, errorMsg); + lookupFuture.complete(newLookupErrorResponse(ServerError.MetadataError, errorMsg, requestId)); + } else if (unwrapEx instanceof MetadataStoreException) { + // Load bundle ownership or acquire lock failed. + // Differ with "IllegalStateException", print warning log. + log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName, errorMsg); + lookupFuture.complete(newLookupErrorResponse(ServerError.MetadataError, errorMsg, requestId)); + } else { + log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName, errorMsg); + lookupFuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, errorMsg, requestId)); + } + } + protected TopicName getTopicName(String topicDomain, String tenant, String cluster, String namespace, @Encoded String encodedTopic) { String decodedName = Codec.decode(encodedTopic); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java index a0e2bf7534c02..e613e0d4c4278 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java @@ -38,7 +38,9 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Matcher; @@ -130,9 +132,9 @@ public class NamespaceService implements AutoCloseable { public static final Pattern HEARTBEAT_NAMESPACE_PATTERN = Pattern.compile("pulsar/[^/]+/([^:]+:\\d+)"); public static final Pattern HEARTBEAT_NAMESPACE_PATTERN_V2 = Pattern.compile("pulsar/([^:]+:\\d+)"); public static final Pattern SLA_NAMESPACE_PATTERN = Pattern.compile(SLA_NAMESPACE_PROPERTY + "/[^/]+/([^:]+:\\d+)"); - public static final String HEARTBEAT_NAMESPACE_FMT = "pulsar/%s/%s:%s"; - public static final String HEARTBEAT_NAMESPACE_FMT_V2 = "pulsar/%s:%s"; - public static final String SLA_NAMESPACE_FMT = SLA_NAMESPACE_PROPERTY + "/%s/%s:%s"; + public static final String HEARTBEAT_NAMESPACE_FMT = "pulsar/%s/%s"; + public static final String HEARTBEAT_NAMESPACE_FMT_V2 = "pulsar/%s"; + public static final String SLA_NAMESPACE_FMT = SLA_NAMESPACE_PROPERTY + "/%s/%s"; private final ConcurrentOpenHashMap namespaceClients; @@ -157,7 +159,7 @@ public class NamespaceService implements AutoCloseable { */ public NamespaceService(PulsarService pulsar) { this.pulsar = pulsar; - host = pulsar.getAdvertisedAddress(); + this.host = pulsar.getAdvertisedAddress(); this.config = pulsar.getConfiguration(); this.loadManager = pulsar.getLoadManager(); this.bundleFactory = new NamespaceBundleFactory(pulsar, Hashing.crc32()); @@ -181,10 +183,10 @@ public CompletableFuture> getBrokerServiceUrlAsync(TopicN CompletableFuture> future = getBundleAsync(topic) .thenCompose(bundle -> { // Do redirection if the cluster is in rollback or deploying. - return redirectManager.findRedirectLookupResultAsync().thenCompose(optResult -> { + return findRedirectLookupResultAsync(bundle).thenCompose(optResult -> { if (optResult.isPresent()) { LOG.info("[{}] Redirect lookup request to {} for topic {}", - pulsar.getSafeWebServiceAddress(), optResult.get(), topic); + pulsar.getBrokerId(), optResult.get(), topic); return CompletableFuture.completedFuture(optResult); } if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { @@ -213,6 +215,13 @@ public CompletableFuture> getBrokerServiceUrlAsync(TopicN return future; } + private CompletableFuture> findRedirectLookupResultAsync(ServiceUnitId bundle) { + if (isSLAOrHeartbeatNamespace(bundle.getNamespaceObject().toString())) { + return CompletableFuture.completedFuture(Optional.empty()); + } + return redirectManager.findRedirectLookupResultAsync(); + } + public CompletableFuture getBundleAsync(TopicName topic) { return bundleFactory.getBundlesAsync(topic.getNamespaceObject()) .thenApply(bundles -> bundles.findBundle(topic)); @@ -281,11 +290,10 @@ public Optional getWebServiceUrl(ServiceUnitId suName, LookupOptions option private CompletableFuture> internalGetWebServiceUrl(Optional topic, NamespaceBundle bundle, LookupOptions options) { - - return redirectManager.findRedirectLookupResultAsync().thenCompose(optResult -> { + return findRedirectLookupResultAsync(bundle).thenCompose(optResult -> { if (optResult.isPresent()) { LOG.info("[{}] Redirect lookup request to {} for topic {}", - pulsar.getSafeWebServiceAddress(), optResult.get(), topic); + pulsar.getBrokerId(), optResult.get(), topic); try { LookupData lookupData = optResult.get().getLookupData(); final String redirectUrl = options.isRequestHttps() @@ -325,15 +333,17 @@ private CompletableFuture> internalGetWebServiceUrl(Optional> availableBroker = getLeastLoadedFromLoadManager(bundle); + Optional availableBroker = getLeastLoadedFromLoadManager(bundle); if (!availableBroker.isPresent()) { LOG.warn("Load manager didn't return any available broker. " + "Returning empty result to lookup. NamespaceBundle[{}]", @@ -558,12 +574,11 @@ private void searchForCandidateBroker(NamespaceBundle bundle, lookupFuture.complete(Optional.empty()); return; } - candidateBroker = availableBroker.get().getLeft(); - candidateBrokerAdvertisedAddr = availableBroker.get().getRight(); + candidateBroker = availableBroker.get(); authoritativeRedirect = true; } else { // forward to leader broker to make assignment - candidateBroker = currentLeader.get().getServiceUrl(); + candidateBroker = currentLeader.get().getBrokerId(); } } } @@ -576,7 +591,7 @@ private void searchForCandidateBroker(NamespaceBundle bundle, try { Objects.requireNonNull(candidateBroker); - if (candidateBroker.equals(pulsar.getSafeWebServiceAddress())) { + if (candidateBroker.equals(pulsar.getBrokerId())) { // Load manager decided that the local broker should try to become the owner ownershipCache.tryAcquiringOwnership(bundle).thenAccept(ownerInfo -> { if (ownerInfo.isDisabled()) { @@ -630,8 +645,7 @@ private void searchForCandidateBroker(NamespaceBundle bundle, } // Now setting the redirect url - createLookupResult(candidateBrokerAdvertisedAddr == null ? candidateBroker - : candidateBrokerAdvertisedAddr, authoritativeRedirect, options.getAdvertisedListenerName()) + createLookupResult(candidateBroker, authoritativeRedirect, options.getAdvertisedListenerName()) .thenAccept(lookupResult -> lookupFuture.complete(Optional.of(lookupResult))) .exceptionally(ex -> { lookupFuture.completeExceptionally(ex); @@ -651,7 +665,7 @@ public CompletableFuture createLookupResult(String candidateBroker CompletableFuture lookupFuture = new CompletableFuture<>(); try { checkArgument(StringUtils.isNotBlank(candidateBroker), "Lookup broker can't be null %s", candidateBroker); - String path = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + parseHostAndPort(candidateBroker); + String path = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + candidateBroker; localBrokerDataCache.get(path).thenAccept(reportData -> { if (reportData.isPresent()) { @@ -687,31 +701,20 @@ public CompletableFuture createLookupResult(String candidateBroker return lookupFuture; } - private boolean isBrokerActive(String candidateBroker) { - String candidateBrokerHostAndPort = parseHostAndPort(candidateBroker); + public boolean isBrokerActive(String candidateBroker) { Set availableBrokers = getAvailableBrokers(); - if (availableBrokers.contains(candidateBrokerHostAndPort)) { + if (availableBrokers.contains(candidateBroker)) { if (LOG.isDebugEnabled()) { - LOG.debug("Broker {} ({}) is available for.", candidateBroker, candidateBrokerHostAndPort); + LOG.debug("Broker {} is available for.", candidateBroker); } return true; } else { - LOG.warn("Broker {} ({}) couldn't be found in available brokers {}", - candidateBroker, candidateBrokerHostAndPort, - availableBrokers.stream().collect(Collectors.joining(","))); + LOG.warn("Broker {} couldn't be found in available brokers {}", + candidateBroker, String.join(",", availableBrokers)); return false; } } - private static String parseHostAndPort(String candidateBroker) { - int uriSeparatorPos = candidateBroker.indexOf("://"); - if (uriSeparatorPos == -1) { - throw new IllegalArgumentException("'" + candidateBroker + "' isn't an URI."); - } - String candidateBrokerHostAndPort = candidateBroker.substring(uriSeparatorPos + 3); - return candidateBrokerHostAndPort; - } - private Set getAvailableBrokers() { try { return loadManager.get().getAvailableBrokers(); @@ -726,7 +729,7 @@ private Set getAvailableBrokers() { * @return * @throws Exception */ - private Optional> getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit) throws Exception { + private Optional getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit) throws Exception { Optional leastLoadedBroker = loadManager.get().getLeastLoaded(serviceUnit); if (!leastLoadedBroker.isPresent()) { LOG.warn("No broker is available for {}", serviceUnit); @@ -734,15 +737,13 @@ private Optional> getLeastLoadedFromLoadManager(ServiceUnit } String lookupAddress = leastLoadedBroker.get().getResourceId(); - String advertisedAddr = (String) leastLoadedBroker.get() - .getProperty(ResourceUnit.PROPERTY_KEY_BROKER_ZNODE_NAME); if (LOG.isDebugEnabled()) { LOG.debug("{} : redirecting to the least loaded broker, lookup address={}", - pulsar.getSafeWebServiceAddress(), + pulsar.getBrokerId(), lookupAddress); } - return Optional.of(Pair.of(lookupAddress, advertisedAddr)); + return Optional.of(lookupAddress); } public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle) { @@ -750,21 +751,42 @@ public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle) { } public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, Optional destinationBroker) { - if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { - return ExtensibleLoadManagerImpl.get(loadManager.get()) - .unloadNamespaceBundleAsync(bundle, destinationBroker); - } + // unload namespace bundle - return unloadNamespaceBundle(bundle, config.getNamespaceBundleUnloadingTimeoutMs(), TimeUnit.MILLISECONDS); + return unloadNamespaceBundle(bundle, destinationBroker, + config.getNamespaceBundleUnloadingTimeoutMs(), TimeUnit.MILLISECONDS); + } + + public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, + Optional destinationBroker, + long timeout, + TimeUnit timeoutUnit) { + return unloadNamespaceBundle(bundle, destinationBroker, timeout, timeoutUnit, true); + } + + public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, + long timeout, + TimeUnit timeoutUnit) { + return unloadNamespaceBundle(bundle, Optional.empty(), timeout, timeoutUnit, true); } - public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, long timeout, TimeUnit timeoutUnit) { - return unloadNamespaceBundle(bundle, timeout, timeoutUnit, true); + public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, + long timeout, + TimeUnit timeoutUnit, + boolean closeWithoutWaitingClientDisconnect) { + return unloadNamespaceBundle(bundle, Optional.empty(), timeout, + timeoutUnit, closeWithoutWaitingClientDisconnect); } - public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, long timeout, + public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle, + Optional destinationBroker, + long timeout, TimeUnit timeoutUnit, boolean closeWithoutWaitingClientDisconnect) { + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { + return ExtensibleLoadManagerImpl.get(loadManager.get()) + .unloadNamespaceBundleAsync(bundle, destinationBroker); + } // unload namespace bundle OwnedBundle ob = ownershipCache.getOwnedBundle(bundle); if (ob == null) { @@ -783,13 +805,23 @@ public CompletableFuture> getOwnedNameSpac .getIsolationDataPoliciesAsync(pulsar.getConfiguration().getClusterName()) .thenApply(nsIsolationPoliciesOpt -> nsIsolationPoliciesOpt.orElseGet(NamespaceIsolationPolicies::new)) .thenCompose(namespaceIsolationPolicies -> { + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { + ExtensibleLoadManagerImpl extensibleLoadManager = + ExtensibleLoadManagerImpl.get(loadManager.get()); + var statusMap = extensibleLoadManager.getOwnedServiceUnits().stream() + .collect(Collectors.toMap(NamespaceBundle::toString, + bundle -> getNamespaceOwnershipStatus(true, + namespaceIsolationPolicies.getPolicyByNamespace( + bundle.getNamespaceObject())))); + return CompletableFuture.completedFuture(statusMap); + } Collection> futures = ownershipCache.getOwnedBundlesAsync().values(); return FutureUtil.waitForAll(futures) .thenApply(__ -> futures.stream() .map(CompletableFuture::join) .collect(Collectors.toMap(bundle -> bundle.getNamespaceBundle().toString(), - bundle -> getNamespaceOwnershipStatus(bundle, + bundle -> getNamespaceOwnershipStatus(bundle.isActive(), namespaceIsolationPolicies.getPolicyByNamespace( bundle.getNamespaceBundle().getNamespaceObject())) )) @@ -797,10 +829,10 @@ public CompletableFuture> getOwnedNameSpac }); } - private NamespaceOwnershipStatus getNamespaceOwnershipStatus(OwnedBundle nsObj, + private NamespaceOwnershipStatus getNamespaceOwnershipStatus(boolean isActive, NamespaceIsolationPolicy nsIsolationPolicy) { NamespaceOwnershipStatus nsOwnedStatus = new NamespaceOwnershipStatus(BrokerAssignment.shared, false, - nsObj.isActive()); + isActive); if (nsIsolationPolicy == null) { // no matching policy found, this namespace must be an uncontrolled one and using shared broker return nsOwnedStatus; @@ -1095,24 +1127,16 @@ public OwnershipCache getOwnershipCache() { } public Set getOwnedServiceUnits() { + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { + ExtensibleLoadManagerImpl extensibleLoadManager = ExtensibleLoadManagerImpl.get(loadManager.get()); + return extensibleLoadManager.getOwnedServiceUnits(); + } return ownershipCache.getOwnedBundles().values().stream().map(OwnedBundle::getNamespaceBundle) .collect(Collectors.toSet()); } public boolean isServiceUnitOwned(ServiceUnitId suName) throws Exception { - if (suName instanceof TopicName) { - return isTopicOwnedAsync((TopicName) suName).get(); - } - - if (suName instanceof NamespaceName) { - return isNamespaceOwned((NamespaceName) suName); - } - - if (suName instanceof NamespaceBundle) { - return ownershipCache.isNamespaceBundleOwned((NamespaceBundle) suName); - } - - throw new IllegalArgumentException("Invalid class of NamespaceBundle: " + suName.getClass().getName()); + return isServiceUnitOwnedAsync(suName).get(config.getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } public CompletableFuture isServiceUnitOwnedAsync(ServiceUnitId suName) { @@ -1137,16 +1161,17 @@ public CompletableFuture isServiceUnitOwnedAsync(ServiceUnitId suName) new IllegalArgumentException("Invalid class of NamespaceBundle: " + suName.getClass().getName())); } + /** + * @Deprecated This method is only used in test now. + */ + @Deprecated public boolean isServiceUnitActive(TopicName topicName) { try { - OwnedBundle ownedBundle = ownershipCache.getOwnedBundle(getBundle(topicName)); - if (ownedBundle == null) { - return false; - } - return ownedBundle.isActive(); - } catch (Exception e) { - LOG.warn("Unable to find OwnedBundle for topic - [{}]", topicName, e); - return false; + return isServiceUnitActiveAsync(topicName).get(pulsar.getConfig() + .getMetadataStoreOperationTimeoutSeconds(), SECONDS); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + LOG.warn("Unable to find OwnedBundle for topic in time - [{}]", topicName, e); + throw new RuntimeException(e); } } @@ -1156,16 +1181,13 @@ public CompletableFuture isServiceUnitActiveAsync(TopicName topicName) return getBundleAsync(topicName) .thenCompose(bundle -> loadManager.get().checkOwnershipAsync(Optional.of(topicName), bundle)); } - Optional> res = ownershipCache.getOwnedBundleAsync(getBundle(topicName)); - if (!res.isPresent()) { - return CompletableFuture.completedFuture(false); - } - - return res.get().thenApply(ob -> ob != null && ob.isActive()); - } - - private boolean isNamespaceOwned(NamespaceName fqnn) throws Exception { - return ownershipCache.getOwnedBundle(getFullBundle(fqnn)) != null; + return getBundleAsync(topicName).thenCompose(bundle -> { + Optional> optionalFuture = ownershipCache.getOwnedBundleAsync(bundle); + if (!optionalFuture.isPresent()) { + return CompletableFuture.completedFuture(false); + } + return optionalFuture.get().thenApply(ob -> ob != null && ob.isActive()); + }); } private CompletableFuture isNamespaceOwnedAsync(NamespaceName fqnn) { @@ -1208,13 +1230,13 @@ public CompletableFuture removeOwnedServiceUnitAsync(NamespaceBundle nsBun return future.thenRun(() -> bundleFactory.invalidateBundleCache(nsBundle.getNamespaceObject())); } - protected void onNamespaceBundleOwned(NamespaceBundle bundle) { + public void onNamespaceBundleOwned(NamespaceBundle bundle) { for (NamespaceBundleOwnershipListener bundleOwnedListener : bundleOwnershipListeners) { notifyNamespaceBundleOwnershipListener(bundle, bundleOwnedListener); } } - protected void onNamespaceBundleUnload(NamespaceBundle bundle) { + public void onNamespaceBundleUnload(NamespaceBundle bundle) { for (NamespaceBundleOwnershipListener bundleOwnedListener : bundleOwnershipListeners) { try { if (bundleOwnedListener.test(bundle)) { @@ -1225,7 +1247,6 @@ protected void onNamespaceBundleUnload(NamespaceBundle bundle) { } } } - public void addNamespaceBundleOwnershipListener(NamespaceBundleOwnershipListener... listeners) { Objects.requireNonNull(listeners); for (NamespaceBundleOwnershipListener listener : listeners) { @@ -1392,6 +1413,9 @@ private CompletableFuture> getPartitionsForTopic(TopicName topicNam }); } + /*** + * List persistent topics names under a namespace, the topic name contains the partition suffix. + */ public CompletableFuture> getListOfPersistentTopics(NamespaceName namespaceName) { return pulsar.getPulsarResources().getTopicResources().listPersistentTopicsAsync(namespaceName); } @@ -1507,10 +1531,6 @@ public boolean checkOwnershipPresent(NamespaceBundle bundle) throws Exception { public CompletableFuture checkOwnershipPresentAsync(NamespaceBundle bundle) { if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { - if (bundle.getNamespaceObject().equals(SYSTEM_NAMESPACE)) { - return FutureUtil.failedFuture(new UnsupportedOperationException( - "Ownership check for system namespace is not supported")); - } ExtensibleLoadManagerImpl extensibleLoadManager = ExtensibleLoadManagerImpl.get(loadManager.get()); return extensibleLoadManager.getOwnershipAsync(Optional.empty(), bundle) .thenApply(Optional::isPresent); @@ -1519,8 +1539,7 @@ public CompletableFuture checkOwnershipPresentAsync(NamespaceBundle bun } public void unloadSLANamespace() throws Exception { - PulsarAdmin adminClient = null; - NamespaceName namespaceName = getSLAMonitorNamespace(host, config); + NamespaceName namespaceName = getSLAMonitorNamespace(pulsar.getBrokerId(), config); LOG.info("Checking owner for SLA namespace {}", namespaceName); @@ -1532,46 +1551,28 @@ public void unloadSLANamespace() throws Exception { } LOG.info("Trying to unload SLA namespace {}", namespaceName); - adminClient = pulsar.getAdminClient(); + PulsarAdmin adminClient = pulsar.getAdminClient(); adminClient.namespaces().unload(namespaceName.toString()); LOG.info("Namespace {} unloaded successfully", namespaceName); } - public static NamespaceName getHeartbeatNamespace(String host, ServiceConfiguration config) { - Integer port = null; - if (config.getWebServicePort().isPresent()) { - port = config.getWebServicePort().get(); - } else if (config.getWebServicePortTls().isPresent()) { - port = config.getWebServicePortTls().get(); - } - return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT, config.getClusterName(), host, port)); + public static NamespaceName getHeartbeatNamespace(String lookupBroker, ServiceConfiguration config) { + return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT, config.getClusterName(), lookupBroker)); } - public static NamespaceName getHeartbeatNamespaceV2(String host, ServiceConfiguration config) { - Integer port = null; - if (config.getWebServicePort().isPresent()) { - port = config.getWebServicePort().get(); - } else if (config.getWebServicePortTls().isPresent()) { - port = config.getWebServicePortTls().get(); - } - return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT_V2, host, port)); + public static NamespaceName getHeartbeatNamespaceV2(String lookupBroker, ServiceConfiguration config) { + return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT_V2, lookupBroker)); } - public static NamespaceName getSLAMonitorNamespace(String host, ServiceConfiguration config) { - Integer port = null; - if (config.getWebServicePort().isPresent()) { - port = config.getWebServicePort().get(); - } else if (config.getWebServicePortTls().isPresent()) { - port = config.getWebServicePortTls().get(); - } - return NamespaceName.get(String.format(SLA_NAMESPACE_FMT, config.getClusterName(), host, port)); + public static NamespaceName getSLAMonitorNamespace(String lookupBroker, ServiceConfiguration config) { + return NamespaceName.get(String.format(SLA_NAMESPACE_FMT, config.getClusterName(), lookupBroker)); } public static String checkHeartbeatNamespace(ServiceUnitId ns) { Matcher m = HEARTBEAT_NAMESPACE_PATTERN.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { LOG.debug("Heartbeat namespace matched the lookup namespace {}", ns.getNamespaceObject().toString()); - return String.format("http://%s", m.group(1)); + return m.group(1); } else { return null; } @@ -1581,7 +1582,7 @@ public static String checkHeartbeatNamespaceV2(ServiceUnitId ns) { Matcher m = HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { LOG.debug("Heartbeat namespace v2 matched the lookup namespace {}", ns.getNamespaceObject().toString()); - return String.format("http://%s", m.group(1)); + return m.group(1); } else { return null; } @@ -1590,7 +1591,7 @@ public static String checkHeartbeatNamespaceV2(ServiceUnitId ns) { public static String getSLAMonitorBrokerName(ServiceUnitId ns) { Matcher m = SLA_NAMESPACE_PATTERN.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { - return String.format("http://%s", m.group(1)); + return m.group(1); } else { return null; } @@ -1603,6 +1604,17 @@ public static boolean isSystemServiceNamespace(String namespace) { || HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(namespace).matches(); } + /** + * used for filtering bundles in special namespace. + * @param namespace + * @return True if namespace is HEARTBEAT_NAMESPACE or SLA_NAMESPACE + */ + public static boolean isSLAOrHeartbeatNamespace(String namespace) { + return SLA_NAMESPACE_PATTERN.matcher(namespace).matches() + || HEARTBEAT_NAMESPACE_PATTERN.matcher(namespace).matches() + || HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(namespace).matches(); + } + public static boolean isHeartbeatNamespace(ServiceUnitId ns) { String namespace = ns.getNamespaceObject().toString(); return HEARTBEAT_NAMESPACE_PATTERN.matcher(namespace).matches() @@ -1610,14 +1622,16 @@ public static boolean isHeartbeatNamespace(ServiceUnitId ns) { } public boolean registerSLANamespace() throws PulsarServerException { - boolean isNameSpaceRegistered = registerNamespace(getSLAMonitorNamespace(host, config), false); + String brokerId = pulsar.getBrokerId(); + boolean isNameSpaceRegistered = registerNamespace(getSLAMonitorNamespace(brokerId, config), false); if (isNameSpaceRegistered) { if (LOG.isDebugEnabled()) { LOG.debug("Added SLA Monitoring namespace name in local cache: ns={}", - getSLAMonitorNamespace(host, config)); + getSLAMonitorNamespace(brokerId, config)); } } else if (LOG.isDebugEnabled()) { - LOG.debug("SLA Monitoring not owned by the broker: ns={}", getSLAMonitorNamespace(host, config)); + LOG.debug("SLA Monitoring not owned by the broker: ns={}", + getSLAMonitorNamespace(brokerId, config)); } return isNameSpaceRegistered; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java index 7d0b5a4147721..0033abf36c78c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java @@ -122,10 +122,10 @@ public OwnershipCache(PulsarService pulsar, NamespaceBundleFactory bundleFactory this.ownerBrokerUrl = pulsar.getBrokerServiceUrl(); this.ownerBrokerUrlTls = pulsar.getBrokerServiceUrlTls(); this.selfOwnerInfo = new NamespaceEphemeralData(ownerBrokerUrl, ownerBrokerUrlTls, - pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), false, pulsar.getAdvertisedListeners()); this.selfOwnerInfoDisabled = new NamespaceEphemeralData(ownerBrokerUrl, ownerBrokerUrlTls, - pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), true, pulsar.getAdvertisedListeners()); this.lockManager = pulsar.getCoordinationService().getLockManager(NamespaceEphemeralData.class); this.locallyAcquiredLocks = new ConcurrentHashMap<>(); @@ -288,10 +288,10 @@ public Optional> getOwnedBundleAsync(NamespaceBun /** * Disable bundle in local cache and on zk. - * - * @param bundle - * @throws Exception + * @Deprecated This is a dangerous method which is currently only used for test, it will occupy the ZK thread. + * Please switch to your own thread after calling this method. */ + @Deprecated public CompletableFuture disableOwnership(NamespaceBundle bundle) { return updateBundleState(bundle, false) .thenCompose(__ -> { @@ -336,7 +336,7 @@ public Map> getLocallyAcqu public synchronized boolean refreshSelfOwnerInfo() { this.selfOwnerInfo = new NamespaceEphemeralData(pulsar.getBrokerServiceUrl(), - pulsar.getBrokerServiceUrlTls(), pulsar.getSafeWebServiceAddress(), + pulsar.getBrokerServiceUrlTls(), pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), false, pulsar.getAdvertisedListeners()); return selfOwnerInfo.getNativeUrl() != null || selfOwnerInfo.getNativeUrlTls() != null; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java index c86aac5316fb9..432aa29798ebd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java @@ -36,7 +36,7 @@ public final class ServiceUnitUtils { */ private static final String OWNER_INFO_ROOT = "/namespace"; - static String path(NamespaceBundle suname) { + public static String path(NamespaceBundle suname) { // The ephemeral node path for new namespaces should always have bundle name appended return OWNER_INFO_ROOT + "/" + suname.toString(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java index 42a82b2de762b..4059ccf5f26eb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -51,6 +52,9 @@ public class ProtocolHandlers implements AutoCloseable { * @return the collection of protocol handlers */ public static ProtocolHandlers load(ServiceConfiguration conf) throws IOException { + if (conf.getMessagingProtocols().isEmpty()) { + return new ProtocolHandlers(Collections.emptyMap()); + } ProtocolHandlerDefinitions definitions = ProtocolHandlerUtils.searchForHandlers( conf.getProtocolHandlerDirectory(), conf.getNarExtractionDirectory()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java index 85e00bb2f87dc..0377ec86488d4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java @@ -135,7 +135,7 @@ public void update(long publishRateInMsgs, long publishRateInBytes) { public boolean tryAcquire(int numbers, long bytes) { return (publishRateLimiterOnMessage == null || publishRateLimiterOnMessage.tryAcquire(numbers)) - && (publishRateLimiterOnByte == null || publishRateLimiterOnByte.tryAcquire(bytes)); + && (publishRateLimiterOnByte == null || publishRateLimiterOnByte.tryAcquire(bytes)); } public void registerRateLimitFunction(String name, RateLimitFunction func) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/RestMessagePublishContext.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/RestMessagePublishContext.java index d7cee8b600b31..3c9adbd3e4fe4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/RestMessagePublishContext.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/RestMessagePublishContext.java @@ -53,7 +53,7 @@ public void completed(Exception exception, long ledgerId, long entryId) { + "triggered send callback.", topic.getName(), ledgerId, entryId); } - topic.recordAddLatency(System.nanoTime() - startTimeNs, TimeUnit.MICROSECONDS); + topic.recordAddLatency(System.nanoTime() - startTimeNs, TimeUnit.NANOSECONDS); positionFuture.complete(PositionImpl.get(ledgerId, entryId)); } recycle(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java index 7d3aa37fa4ec5..075c6227b7357 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java @@ -53,6 +53,7 @@ import org.apache.avro.io.DecoderFactory; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.admin.impl.PersistentTopicsBase; import org.apache.pulsar.broker.authentication.AuthenticationParameters; @@ -432,7 +433,10 @@ private CompletableFuture lookUpBrokerForTopic(TopicName partitionedTopicN } LookupResult result = optionalResult.get(); - if (result.getLookupData().getHttpUrl().equals(pulsar().getWebServiceAddress())) { + String httpUrl = result.getLookupData().getHttpUrl(); + String httpUrlTls = result.getLookupData().getHttpUrlTls(); + if ((StringUtils.isNotBlank(httpUrl) && httpUrl.equals(pulsar().getWebServiceAddress())) + || (StringUtils.isNotBlank(httpUrlTls) && httpUrlTls.equals(pulsar().getWebServiceAddressTls()))) { // Current broker owns the topic, add to owning topic. if (log.isDebugEnabled()) { log.debug("Complete topic look up for rest produce message request for topic {}, " @@ -453,12 +457,10 @@ private CompletableFuture lookUpBrokerForTopic(TopicName partitionedTopicN } if (result.isRedirect()) { // Redirect lookup. - completeLookup(Pair.of(Arrays.asList(result.getLookupData().getHttpUrl(), - result.getLookupData().getHttpUrlTls()), false), redirectAddresses, future); + completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), false), redirectAddresses, future); } else { // Found owner for topic. - completeLookup(Pair.of(Arrays.asList(result.getLookupData().getHttpUrl(), - result.getLookupData().getHttpUrlTls()), true), redirectAddresses, future); + completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), true), redirectAddresses, future); } } }).exceptionally(exception -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java index 8f6caa7a20801..eb747a6535b99 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java @@ -49,6 +49,7 @@ import org.apache.pulsar.common.api.proto.ReplicatedSubscriptionsSnapshot; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.Markers; +import org.apache.pulsar.compaction.Compactor; import org.checkerframework.checker.nullness.qual.Nullable; @Slf4j @@ -171,37 +172,49 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i entry.release(); continue; } - if (!isReplayRead && msgMetadata != null && msgMetadata.hasTxnidMostBits() + if (msgMetadata != null && msgMetadata.hasTxnidMostBits() && msgMetadata.hasTxnidLeastBits()) { if (Markers.isTxnMarker(msgMetadata)) { - // because consumer can receive message is smaller than maxReadPosition, - // so this marker is useless for this subscription - individualAcknowledgeMessageIfNeeded(entry.getPosition(), Collections.emptyMap()); - entries.set(i, null); - entry.release(); - continue; + if (cursor == null || !cursor.getName().equals(Compactor.COMPACTION_SUBSCRIPTION)) { + // because consumer can receive message is smaller than maxReadPosition, + // so this marker is useless for this subscription + individualAcknowledgeMessageIfNeeded(Collections.singletonList(entry.getPosition()), + Collections.emptyMap()); + entries.set(i, null); + entry.release(); + continue; + } } else if (((PersistentTopic) subscription.getTopic()) .isTxnAborted(new TxnID(msgMetadata.getTxnidMostBits(), msgMetadata.getTxnidLeastBits()), (PositionImpl) entry.getPosition())) { - individualAcknowledgeMessageIfNeeded(entry.getPosition(), Collections.emptyMap()); + individualAcknowledgeMessageIfNeeded(Collections.singletonList(entry.getPosition()), + Collections.emptyMap()); entries.set(i, null); entry.release(); continue; } } - if (msgMetadata == null || Markers.isServerOnlyMarker(msgMetadata)) { + if (msgMetadata == null || (Markers.isServerOnlyMarker(msgMetadata))) { PositionImpl pos = (PositionImpl) entry.getPosition(); // Message metadata was corrupted or the messages was a server-only marker if (Markers.isReplicatedSubscriptionSnapshotMarker(msgMetadata)) { + final int readerIndex = metadataAndPayload.readerIndex(); processReplicatedSubscriptionSnapshot(pos, metadataAndPayload); + metadataAndPayload.readerIndex(readerIndex); } - entries.set(i, null); - entry.release(); - individualAcknowledgeMessageIfNeeded(pos, Collections.emptyMap()); - continue; + // Deliver marker to __compaction cursor to avoid compaction task stuck, + // and filter out them when doing topic compaction. + if (msgMetadata == null || cursor == null + || !cursor.getName().equals(Compactor.COMPACTION_SUBSCRIPTION)) { + entries.set(i, null); + entry.release(); + individualAcknowledgeMessageIfNeeded(Collections.singletonList(pos), + Collections.emptyMap()); + continue; + } } else if (trackDelayedDelivery(entry.getLedgerId(), entry.getEntryId(), msgMetadata)) { // The message is marked for delayed delivery. Ignore for now. entries.set(i, null); @@ -213,12 +226,7 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i this.filterAcceptedMsgs.add(entryMsgCnt); } - totalEntries++; int batchSize = msgMetadata.getNumMessagesInBatch(); - totalMessages += batchSize; - totalBytes += metadataAndPayload.readableBytes(); - totalChunkedMessages += msgMetadata.hasChunkId() ? 1 : 0; - batchSizes.setBatchSize(i, batchSize); long[] ackSet = null; if (indexesAcks != null && cursor != null) { PositionImpl position = PositionImpl.get(entry.getLedgerId(), entry.getEntryId()); @@ -262,6 +270,12 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i } } + totalEntries++; + totalMessages += batchSize; + totalBytes += metadataAndPayload.readableBytes(); + totalChunkedMessages += msgMetadata.hasChunkId() ? 1 : 0; + batchSizes.setBatchSize(i, batchSize); + BrokerInterceptor interceptor = subscription.interceptor(); if (null != interceptor) { // keep for compatibility if users has implemented the old interface @@ -270,8 +284,7 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i } } if (CollectionUtils.isNotEmpty(entriesToFiltered)) { - subscription.acknowledgeMessage(entriesToFiltered, AckType.Individual, - Collections.emptyMap()); + individualAcknowledgeMessageIfNeeded(entriesToFiltered, Collections.emptyMap()); int filtered = entriesToFiltered.size(); Topic topic = subscription.getTopic(); @@ -300,9 +313,9 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i return totalEntries; } - private void individualAcknowledgeMessageIfNeeded(Position position, Map properties) { + private void individualAcknowledgeMessageIfNeeded(List positions, Map properties) { if (!(subscription instanceof CompactorSubscription)) { - subscription.acknowledgeMessage(Collections.singletonList(position), AckType.Individual, properties); + subscription.acknowledgeMessage(positions, AckType.Individual, properties); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java index 5098890242b6c..310354dcd3b47 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java @@ -166,7 +166,22 @@ public synchronized CompletableFuture addConsumer(Consumer consumer) { } if (subscriptionType == SubType.Exclusive && !consumers.isEmpty()) { - return FutureUtil.failedFuture(new ConsumerBusyException("Exclusive consumer is already connected")); + Consumer actConsumer = ACTIVE_CONSUMER_UPDATER.get(this); + if (actConsumer != null) { + return actConsumer.cnx().checkConnectionLiveness().thenCompose(actConsumerStillAlive -> { + if (actConsumerStillAlive == null || actConsumerStillAlive) { + return FutureUtil.failedFuture(new ConsumerBusyException("Exclusive consumer is already" + + " connected")); + } else { + return addConsumer(consumer); + } + }); + } else { + // It should never happen. + + return FutureUtil.failedFuture(new ConsumerBusyException("Active consumer is in a strange state." + + " Active consumer is null, but there are " + consumers.size() + " registered.")); + } } if (subscriptionType == SubType.Failover && isConsumersExceededOnSubscription()) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java index deab89cda72dc..6d0dc2dc9e158 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; @@ -47,6 +48,7 @@ public abstract class AbstractReplicator { protected final PulsarClientImpl replicationClient; protected final PulsarClientImpl client; protected String replicatorId; + protected final Topic localTopic; protected volatile ProducerImpl producer; public static final String REPL_PRODUCER_NAME_DELIMITER = "-->"; @@ -67,11 +69,12 @@ protected enum State { Stopped, Starting, Started, Stopping } - public AbstractReplicator(String localCluster, String localTopicName, String remoteCluster, String remoteTopicName, + public AbstractReplicator(String localCluster, Topic localTopic, String remoteCluster, String remoteTopicName, String replicatorPrefix, BrokerService brokerService, PulsarClientImpl replicationClient) throws PulsarServerException { this.brokerService = brokerService; - this.localTopicName = localTopicName; + this.localTopic = localTopic; + this.localTopicName = localTopic.getName(); this.replicatorPrefix = replicatorPrefix; this.localCluster = localCluster.intern(); this.remoteTopicName = remoteTopicName; @@ -120,7 +123,8 @@ public synchronized void startProducer() { replicatorId, waitTimeMs / 1000.0); } // BackOff before retrying - brokerService.executor().schedule(this::startProducer, waitTimeMs, TimeUnit.MILLISECONDS); + brokerService.executor().schedule(this::checkTopicActiveAndRetryStartProducer, waitTimeMs, + TimeUnit.MILLISECONDS); return; } State state = STATE_UPDATER.get(this); @@ -147,7 +151,8 @@ public synchronized void startProducer() { replicatorId, ex.getMessage(), waitTimeMs / 1000.0); // BackOff before retrying - brokerService.executor().schedule(this::startProducer, waitTimeMs, TimeUnit.MILLISECONDS); + brokerService.executor().schedule(this::checkTopicActiveAndRetryStartProducer, waitTimeMs, + TimeUnit.MILLISECONDS); } else { log.warn("[{}] Failed to create remote producer. Replicator state: {}", replicatorId, STATE_UPDATER.get(this), ex); @@ -157,13 +162,38 @@ public synchronized void startProducer() { } + protected void checkTopicActiveAndRetryStartProducer() { + isLocalTopicActive().thenAccept(isTopicActive -> { + if (isTopicActive) { + startProducer(); + } + }).exceptionally(ex -> { + log.warn("[{}] Stop retry to create producer due to topic load fail. Replicator state: {}", replicatorId, + STATE_UPDATER.get(this), ex); + return null; + }); + } + + protected CompletableFuture isLocalTopicActive() { + CompletableFuture> topicFuture = brokerService.getTopics().get(localTopicName); + if (topicFuture == null){ + return CompletableFuture.completedFuture(false); + } + return topicFuture.thenApplyAsync(optional -> { + if (optional.isEmpty()) { + return false; + } + return optional.get() == localTopic; + }, brokerService.executor()); + } + protected synchronized CompletableFuture closeProducerAsync() { if (producer == null) { STATE_UPDATER.set(this, State.Stopped); return CompletableFuture.completedFuture(null); } CompletableFuture future = producer.closeAsync(); - future.thenRun(() -> { + return future.thenRun(() -> { STATE_UPDATER.set(this, State.Stopped); this.producer = null; // deactivate further read @@ -178,7 +208,6 @@ protected synchronized CompletableFuture closeProducerAsync() { brokerService.executor().schedule(this::closeProducerAsync, waitTimeMs, TimeUnit.MILLISECONDS); return null; }); - return future; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java index 6245ce19eebc6..507a5af5270e8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.service; import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; import static org.apache.bookkeeper.mledger.impl.ManagedLedgerMBeanImpl.ENTRY_LATENCY_BUCKETS_USEC; import com.google.common.base.MoreObjects; import java.util.ArrayList; @@ -35,10 +36,12 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.ToLongFunction; +import javax.annotation.Nonnull; import lombok.Getter; import org.apache.bookkeeper.mledger.util.StatsBuckets; import org.apache.commons.collections4.CollectionUtils; @@ -55,11 +58,11 @@ import org.apache.pulsar.broker.service.BrokerServiceException.TopicMigratedException; import org.apache.pulsar.broker.service.BrokerServiceException.TopicTerminatedException; import org.apache.pulsar.broker.service.plugin.EntryFilter; -import org.apache.pulsar.broker.service.schema.BookkeeperSchemaStorage; import org.apache.pulsar.broker.service.schema.SchemaRegistryService; import org.apache.pulsar.broker.service.schema.exceptions.IncompatibleSchemaException; import org.apache.pulsar.broker.stats.prometheus.metrics.Summary; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.ClusterData.ClusterUrl; @@ -115,7 +118,7 @@ public abstract class AbstractTopic implements Topic, TopicPolicyListener RATE_LIMITED_UPDATER = AtomicLongFieldUpdater.newUpdater(AbstractTopic.class, "publishRateLimitedTimes"); - protected volatile long publishRateLimitedTimes = 0; + protected volatile long publishRateLimitedTimes = 0L; + + private static final AtomicIntegerFieldUpdater USER_CREATED_PRODUCER_COUNTER_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(AbstractTopic.class, "userCreatedProducerCount"); + protected volatile int userCreatedProducerCount = 0; protected volatile Optional topicEpoch = Optional.empty(); private volatile boolean hasExclusiveProducer; @@ -447,14 +454,8 @@ protected boolean isProducersExceeded(Producer producer) { return false; } Integer maxProducers = topicPolicies.getMaxProducersPerTopic().get(); - if (maxProducers != null && maxProducers > 0 && maxProducers <= getUserCreatedProducersSize()) { - return true; - } - return false; - } - - private long getUserCreatedProducersSize() { - return producers.values().stream().filter(p -> !p.isRemote()).count(); + return maxProducers != null && maxProducers > 0 + && maxProducers <= USER_CREATED_PRODUCER_COUNTER_UPDATER.get(this); } protected void registerTopicPolicyListener() { @@ -672,21 +673,7 @@ private boolean allowAutoUpdateSchema() { @Override public CompletableFuture deleteSchema() { - String id = getSchemaId(); - SchemaRegistryService schemaRegistryService = brokerService.pulsar().getSchemaRegistryService(); - return BookkeeperSchemaStorage.ignoreUnrecoverableBKException(schemaRegistryService.getSchema(id)) - .thenCompose(schema -> { - if (schema != null) { - // It's different from `SchemasResource.deleteSchema` - // because when we delete a topic, the schema - // history is meaningless. But when we delete a schema of a topic, a new schema could be - // registered in the future. - log.info("Delete schema storage of id: {}", id); - return schemaRegistryService.deleteSchemaStorage(id); - } else { - return CompletableFuture.completedFuture(null); - } - }); + return brokerService.deleteSchema(TopicName.get(getName())); } @Override @@ -716,15 +703,14 @@ public CompletableFuture> addProducer(Producer producer, log.warn("[{}] Attempting to add producer to a terminated topic", topic); throw new TopicTerminatedException("Topic was already terminated"); } - internalAddProducer(producer); - - USAGE_COUNT_UPDATER.incrementAndGet(this); - if (log.isDebugEnabled()) { - log.debug("[{}] [{}] Added producer -- count: {}", topic, producer.getProducerName(), - USAGE_COUNT_UPDATER.get(this)); - } - - return CompletableFuture.completedFuture(producerEpoch); + return internalAddProducer(producer).thenApply(ignore -> { + USAGE_COUNT_UPDATER.incrementAndGet(this); + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Added producer -- count: {}", topic, producer.getProducerName(), + USAGE_COUNT_UPDATER.get(this)); + } + return producerEpoch; + }); } catch (BrokerServiceException e) { return FutureUtil.failedFuture(e); } finally { @@ -970,15 +956,17 @@ protected void checkTopicFenced() throws BrokerServiceException { } } - protected void internalAddProducer(Producer producer) throws BrokerServiceException { + protected CompletableFuture internalAddProducer(Producer producer) { if (isProducersExceeded(producer)) { log.warn("[{}] Attempting to add producer to topic which reached max producers limit", topic); - throw new BrokerServiceException.ProducerBusyException("Topic reached max producers limit"); + return CompletableFuture.failedFuture( + new BrokerServiceException.ProducerBusyException("Topic reached max producers limit")); } if (isSameAddressProducersExceeded(producer)) { log.warn("[{}] Attempting to add producer to topic which reached max same address producers limit", topic); - throw new BrokerServiceException.ProducerBusyException("Topic reached max same address producers limit"); + return CompletableFuture.failedFuture( + new BrokerServiceException.ProducerBusyException("Topic reached max same address producers limit")); } if (log.isDebugEnabled()) { @@ -987,25 +975,46 @@ protected void internalAddProducer(Producer producer) throws BrokerServiceExcept Producer existProducer = producers.putIfAbsent(producer.getProducerName(), producer); if (existProducer != null) { - tryOverwriteOldProducer(existProducer, producer); + return tryOverwriteOldProducer(existProducer, producer); + } else if (!producer.isRemote()) { + USER_CREATED_PRODUCER_COUNTER_UPDATER.incrementAndGet(this); } + return CompletableFuture.completedFuture(null); } - private void tryOverwriteOldProducer(Producer oldProducer, Producer newProducer) - throws BrokerServiceException { - if (newProducer.isSuccessorTo(oldProducer) && !isUserProvidedProducerName(oldProducer) - && !isUserProvidedProducerName(newProducer)) { + private CompletableFuture tryOverwriteOldProducer(Producer oldProducer, Producer newProducer) { + if (newProducer.isSuccessorTo(oldProducer)) { oldProducer.close(false); if (!producers.replace(newProducer.getProducerName(), oldProducer, newProducer)) { // Met concurrent update, throw exception here so that client can try reconnect later. - throw new BrokerServiceException.NamingException("Producer with name '" + newProducer.getProducerName() - + "' replace concurrency error"); + return CompletableFuture.failedFuture(new BrokerServiceException.NamingException("Producer with name '" + + newProducer.getProducerName() + "' replace concurrency error")); } else { handleProducerRemoved(oldProducer); + return CompletableFuture.completedFuture(null); } } else { - throw new BrokerServiceException.NamingException( - "Producer with name '" + newProducer.getProducerName() + "' is already connected to topic"); + // If a producer with the same name tries to use a new connection, async check the old connection is + // available. The producers related the connection that not available are automatically cleaned up. + if (!Objects.equals(oldProducer.getCnx(), newProducer.getCnx())) { + return oldProducer.getCnx().checkConnectionLiveness().thenCompose(previousIsActive -> { + if (previousIsActive) { + return CompletableFuture.failedFuture(new BrokerServiceException.NamingException( + "Producer with name '" + newProducer.getProducerName() + + "' is already connected to topic")); + } else { + // If the connection of the previous producer is not active, the method + // "cnx().checkConnectionLiveness()" will trigger the close for it and kick off the previous + // producer. So try to add current producer again. + // The recursive call will be stopped by these two case(This prevents infinite call): + // 1. add current producer success. + // 2. once another same name producer registered. + return internalAddProducer(newProducer); + } + }); + } + return CompletableFuture.failedFuture(new BrokerServiceException.NamingException( + "Producer with name '" + newProducer.getProducerName() + "' is already connected to topic")); } } @@ -1020,6 +1029,9 @@ public void removeProducer(Producer producer) { checkArgument(producer.getTopic() == this); if (producers.remove(producer.getProducerName(), producer)) { + if (!producer.isRemote()) { + USER_CREATED_PRODUCER_COUNTER_UPDATER.decrementAndGet(this); + } handleProducerRemoved(producer); } } @@ -1124,6 +1136,12 @@ public PublishRateLimiter getBrokerPublishRateLimiter() { return brokerService.getBrokerPublishRateLimiter(); } + /** + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and we can use + * #{@link AbstractTopic#updateResourceGroupLimiter(Policies)} to instead of it. + */ + @Deprecated public void updateResourceGroupLimiter(Optional optPolicies) { Policies policies; try { @@ -1137,17 +1155,20 @@ public void updateResourceGroupLimiter(Optional optPolicies) { log.warn("[{}] Error getting policies {} and publish throttling will be disabled", topic, e.getMessage()); policies = new Policies(); } + updateResourceGroupLimiter(policies); + } + public void updateResourceGroupLimiter(@Nonnull Policies namespacePolicies) { + requireNonNull(namespacePolicies); // attach the resource-group level rate limiters, if set - String rgName = policies.resource_group_name; + String rgName = namespacePolicies.resource_group_name; if (rgName != null) { final ResourceGroup resourceGroup = - brokerService.getPulsar().getResourceGroupServiceManager().resourceGroupGet(rgName); + brokerService.getPulsar().getResourceGroupServiceManager().resourceGroupGet(rgName); if (resourceGroup != null) { this.resourceGroupRateLimitingEnabled = true; this.resourceGroupPublishLimiter = resourceGroup.getResourceGroupPublishLimiter(); - this.resourceGroupPublishLimiter.registerRateLimitFunction(this.getName(), - () -> this.enableCnxAutoRead()); + this.resourceGroupPublishLimiter.registerRateLimitFunction(this.getName(), this::enableCnxAutoRead); log.info("Using resource group {} rate limiter for topic {}", rgName, topic); return; } @@ -1163,6 +1184,10 @@ public void updateResourceGroupLimiter(Optional optPolicies) { } public void updateEntryFilters() { + if (isSystemTopic()) { + entryFilters = Pair.of(null, Collections.emptyList()); + return; + } final EntryFilters entryFiltersPolicy = getEntryFiltersPolicy(); if (entryFiltersPolicy == null || StringUtils.isBlank(entryFiltersPolicy.getEntryFilterNames())) { entryFilters = Pair.of(null, Collections.emptyList()); @@ -1261,7 +1286,7 @@ protected boolean isExceedMaximumMessageSize(int size, PublishContext publishCon /** * update topic publish dispatcher for this topic. */ - public void updatePublishDispatcher() { + public void updatePublishRateLimiter() { synchronized (topicPublishRateLimiterLock) { PublishRate publishRate = topicPolicies.getPublishRate().get(); if (publishRate.publishThrottlingRateInByte > 0 || publishRate.publishThrottlingRateInMsg > 0) { @@ -1274,7 +1299,7 @@ public void updatePublishDispatcher() { || this.topicPublishRateLimiter == PublishRateLimiter.DISABLED_RATE_LIMITER) { // create new rateLimiter if rate-limiter is disabled if (preciseTopicPublishRateLimitingEnable) { - this.topicPublishRateLimiter = new PrecisPublishLimiter(publishRate, + this.topicPublishRateLimiter = new PrecisePublishLimiter(publishRate, () -> this.enableCnxAutoRead(), brokerService.pulsar().getExecutor()); } else { this.topicPublishRateLimiter = new PublishRateLimiterImpl(publishRate); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java index bc2541c802e63..6ad1697adfc39 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java @@ -103,7 +103,10 @@ public void handleExceededBacklogQuota(PersistentTopic persistentTopic, BacklogQ break; case producer_exception: case producer_request_hold: - disconnectProducers(persistentTopic); + if (!advanceSlowestSystemCursor(persistentTopic)) { + // The slowest is not a system cursor. Disconnecting producers to put backpressure. + disconnectProducers(persistentTopic); + } break; default: break; @@ -268,4 +271,27 @@ private void disconnectProducers(PersistentTopic persistentTopic) { }); } + + /** + * Advances the slowest cursor if that is a system cursor. + * + * @param persistentTopic + * @return true if the slowest cursor is a system cursor + */ + private boolean advanceSlowestSystemCursor(PersistentTopic persistentTopic) { + + ManagedLedgerImpl mLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + ManagedCursor slowestConsumer = mLedger.getSlowestConsumer(); + if (slowestConsumer == null) { + return false; + } + + if (PersistentTopic.isDedupCursorName(slowestConsumer.getName())) { + persistentTopic.getMessageDeduplication().takeSnapshot(); + return true; + } + + // We may need to check other system cursors here : replicator, compaction + return false; + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index bb08734298f04..fbec9bf413a50 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.service; import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; import static org.apache.bookkeeper.mledger.ManagedLedgerConfig.PROPERTY_SOURCE_TOPIC_KEY; import static org.apache.commons.collections4.CollectionUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.isNotBlank; @@ -53,6 +54,7 @@ import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.RejectedExecutionException; @@ -68,7 +70,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.function.Predicate; -import javax.ws.rs.core.Response; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; import lombok.AccessLevel; import lombok.AllArgsConstructor; import lombok.Getter; @@ -100,6 +103,7 @@ import org.apache.pulsar.broker.intercept.BrokerInterceptor; import org.apache.pulsar.broker.intercept.ManagedLedgerInterceptorImpl; import org.apache.pulsar.broker.loadbalance.LoadManager; +import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.resources.DynamicConfigurationResources; import org.apache.pulsar.broker.resources.LocalPoliciesResources; @@ -157,12 +161,12 @@ import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicType; import org.apache.pulsar.common.policies.data.stats.TopicStatsImpl; +import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.common.util.FieldParser; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.GracefulExecutorServicesShutdown; import org.apache.pulsar.common.util.RateLimiter; -import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashSet; import org.apache.pulsar.common.util.netty.ChannelFutures; @@ -373,8 +377,8 @@ public BrokerService(PulsarService pulsar, EventLoopGroup eventLoopGroup) throws if (pulsar.getConfiguration().getMaxUnackedMessagesPerBroker() > 0 && pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked() > 0.0) { this.maxUnackedMessages = pulsar.getConfiguration().getMaxUnackedMessagesPerBroker(); - this.maxUnackedMsgsPerDispatcher = (int) ((maxUnackedMessages - * pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked()) / 100); + this.maxUnackedMsgsPerDispatcher = (int) (maxUnackedMessages + * pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked()); log.info("Enabling per-broker unack-message limit {} and dispatcher-limit {} on blocked-broker", maxUnackedMessages, maxUnackedMsgsPerDispatcher); // block misbehaving dispatcher by checking periodically @@ -821,6 +825,7 @@ public CompletableFuture closeAsync() { for (EventLoopGroup group : protocolHandlersWorkerGroups) { shutdownEventLoops.add(shutdownEventLoopGracefully(group)); } + CompletableFuture shutdownFuture = CompletableFuture.allOf(shutdownEventLoops.toArray(new CompletableFuture[0])) .handle((v, t) -> { @@ -831,7 +836,7 @@ public CompletableFuture closeAsync() { } return null; }) - .thenCompose(__ -> { + .thenComposeAsync(__ -> { log.info("Continuing to second phase in shutdown."); List> asyncCloseFutures = new ArrayList<>(); @@ -895,6 +900,12 @@ public CompletableFuture closeAsync() { return null; }); return combined; + }, runnable -> { + // run the 2nd phase of the shutdown in a separate thread + Thread thread = new Thread(runnable); + thread.setName("BrokerService-shutdown-phase2"); + thread.setDaemon(false); + thread.start(); }); FutureUtil.whenCancelledOrTimedOut(shutdownFuture, () -> cancellableDownstreamFutureReference .thenAccept(future -> future.cancel(false))); @@ -1040,19 +1051,50 @@ public CompletableFuture> getTopic(final TopicName topicName, bo } final boolean isPersistentTopic = topicName.getDomain().equals(TopicDomain.persistent); if (isPersistentTopic) { - return topics.computeIfAbsent(topicName.toString(), (tpName) -> { - if (topicName.isPartitioned()) { - return fetchPartitionedTopicMetadataAsync(TopicName.get(topicName.getPartitionedTopicName())) - .thenCompose((metadata) -> { - // Allow crate non-partitioned persistent topic that name includes `partition` - if (metadata.partitions == 0 - || topicName.getPartitionIndex() < metadata.partitions) { - return loadOrCreatePersistentTopic(tpName, createIfMissing, properties); - } - return CompletableFuture.completedFuture(Optional.empty()); - }); + return pulsar.getPulsarResources().getTopicResources().persistentTopicExists(topicName) + .thenCompose(exists -> { + if (!exists && !createIfMissing) { + return CompletableFuture.completedFuture(Optional.empty()); } - return loadOrCreatePersistentTopic(tpName, createIfMissing, properties); + return getTopicPoliciesBypassSystemTopic(topicName).exceptionally(ex -> { + final Throwable rc = FutureUtil.unwrapCompletionException(ex); + final String errorInfo = String.format("Topic creation encountered an exception by initialize" + + " topic policies service. topic_name=%s error_message=%s", topicName, + rc.getMessage()); + log.error(errorInfo, rc); + throw FutureUtil.wrapToCompletionException(new ServiceUnitNotReadyException(errorInfo)); + }).thenCompose(optionalTopicPolicies -> { + final TopicPolicies topicPolicies = optionalTopicPolicies.orElse(null); + return topics.computeIfAbsent(topicName.toString(), (tpName) -> { + if (topicName.isPartitioned()) { + final TopicName topicNameEntity = TopicName.get(topicName.getPartitionedTopicName()); + return fetchPartitionedTopicMetadataAsync(topicNameEntity) + .thenCompose((metadata) -> { + // Allow crate non-partitioned persistent topic that name includes + // `partition` + if (metadata.partitions == 0 + || topicName.getPartitionIndex() < metadata.partitions) { + return loadOrCreatePersistentTopic(tpName, createIfMissing, + properties, topicPolicies); + } + final String errorMsg = + String.format("Illegal topic partition name %s with max allowed " + + "%d partitions", topicName, metadata.partitions); + log.warn(errorMsg); + return FutureUtil.failedFuture( + new BrokerServiceException.NotAllowedException(errorMsg)); + }); + } + return loadOrCreatePersistentTopic(tpName, createIfMissing, properties, topicPolicies); + }).thenCompose(optionalTopic -> { + if (!optionalTopic.isPresent() && createIfMissing) { + log.warn("[{}] Try to recreate the topic with createIfMissing=true " + + "but the returned topic is empty", topicName); + return getTopic(topicName, createIfMissing, properties); + } + return CompletableFuture.completedFuture(optionalTopic); + }); + }); }); } else { return topics.computeIfAbsent(topicName.toString(), (name) -> { @@ -1106,6 +1148,18 @@ public CompletableFuture> getTopic(final TopicName topicName, bo } } + private CompletableFuture> getTopicPoliciesBypassSystemTopic(@Nonnull TopicName topicName) { + Objects.requireNonNull(topicName); + final ServiceConfiguration serviceConfiguration = pulsar.getConfiguration(); + if (serviceConfiguration.isSystemTopicEnabled() && serviceConfiguration.isTopicLevelPoliciesEnabled() + && !NamespaceService.isSystemServiceNamespace(topicName.getNamespace()) + && !SystemTopicNames.isTopicPoliciesSystemTopic(topicName.toString())) { + return pulsar.getTopicPoliciesService().getTopicPoliciesAsync(topicName); + } else { + return CompletableFuture.completedFuture(Optional.empty()); + } + } + public CompletableFuture deleteTopic(String topic, boolean forceDelete) { topicEventsDispatcher.notify(topic, TopicEvent.DELETE, EventStage.BEFORE); CompletableFuture result = deleteTopicInternal(topic, forceDelete); @@ -1155,26 +1209,33 @@ private CompletableFuture deleteTopicInternal(String topic, boolean forceD CompletableFuture future = new CompletableFuture<>(); CompletableFuture deleteTopicAuthenticationFuture = new CompletableFuture<>(); deleteTopicAuthenticationWithRetry(topic, deleteTopicAuthenticationFuture, 5); - - deleteTopicAuthenticationFuture.whenComplete((v, ex) -> { + deleteTopicAuthenticationFuture + .thenCompose(__ -> deleteSchema(tn)) + .thenCompose(__ -> { + if (!SystemTopicNames.isTopicPoliciesSystemTopic(topic) + && getPulsar().getConfiguration().isSystemTopicEnabled()) { + return deleteTopicPolicies(tn); + } + return CompletableFuture.completedFuture(null); + }).whenComplete((v, ex) -> { if (ex != null) { future.completeExceptionally(ex); return; } CompletableFuture mlConfigFuture = getManagedLedgerConfig(topicName); managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), - mlConfigFuture, new DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - future.complete(null); - } + mlConfigFuture, new DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + future.complete(null); + } - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - future.completeExceptionally(exception); - } - }, null); - }); + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + future.completeExceptionally(exception); + } + }, null); + }); return future; } @@ -1503,7 +1564,7 @@ public PulsarAdmin getClusterPulsarAdmin(String cluster, Optional c * @throws RuntimeException */ protected CompletableFuture> loadOrCreatePersistentTopic(final String topic, - boolean createIfMissing, Map properties) throws RuntimeException { + boolean createIfMissing, Map properties, @Nullable TopicPolicies topicPolicies) { final CompletableFuture> topicFuture = FutureUtil.createFutureWithTimeout( Duration.ofSeconds(pulsar.getConfiguration().getTopicLoadTimeoutSeconds()), executor(), () -> FAILED_TO_LOAD_TOPIC_TIMEOUT_EXCEPTION); @@ -1521,7 +1582,8 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get(); if (topicLoadSemaphore.tryAcquire()) { - checkOwnershipAndCreatePersistentTopic(topic, createIfMissing, topicFuture, properties); + checkOwnershipAndCreatePersistentTopic(topic, createIfMissing, topicFuture, + properties, topicPolicies); topicFuture.handle((persistentTopic, ex) -> { // release permit and process pending topic topicLoadSemaphore.release(); @@ -1529,7 +1591,8 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S return null; }); } else { - pendingTopicLoadingQueue.add(new TopicLoadingContext(topic, topicFuture, properties)); + pendingTopicLoadingQueue.add(new TopicLoadingContext(topic, + topicFuture, properties, topicPolicies)); if (log.isDebugEnabled()) { log.debug("topic-loading for {} added into pending queue", topic); } @@ -1570,7 +1633,7 @@ protected CompletableFuture> fetchTopicPropertiesAsync(Topic private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, - Map properties) { + Map properties, @Nullable TopicPolicies topicPolicies) { TopicName topicName = TopicName.get(topic); pulsar.getNamespaceService().isServiceUnitActiveAsync(topicName) .thenAccept(isActive -> { @@ -1584,7 +1647,8 @@ private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean } propertiesFuture.thenAccept(finalProperties -> //TODO add topicName in properties? - createPersistentTopic(topic, createIfMissing, topicFuture, finalProperties) + createPersistentTopic(topic, createIfMissing, topicFuture, + finalProperties, topicPolicies) ).exceptionally(throwable -> { log.warn("[{}] Read topic property failed", topic, throwable); pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); @@ -1609,12 +1673,12 @@ private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean public void createPersistentTopic0(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, Map properties) { - createPersistentTopic(topic, createIfMissing, topicFuture, properties); + createPersistentTopic(topic, createIfMissing, topicFuture, properties, null); } private void createPersistentTopic(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, - Map properties) { + Map properties, @Nullable TopicPolicies topicPolicies) { TopicName topicName = TopicName.get(topic); final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); @@ -1630,7 +1694,8 @@ private void createPersistentTopic(final String topic, boolean createIfMissing, ? checkMaxTopicsPerNamespace(topicName, 1) : CompletableFuture.completedFuture(null); - maxTopicsCheck.thenCompose(__ -> getManagedLedgerConfig(topicName)).thenAccept(managedLedgerConfig -> { + maxTopicsCheck.thenCompose(__ -> + getManagedLedgerConfig(topicName, topicPolicies)).thenAccept(managedLedgerConfig -> { if (isBrokerEntryMetadataEnabled() || isBrokerPayloadProcessorEnabled()) { // init managedLedger interceptor Set interceptors = new HashSet<>(); @@ -1697,16 +1762,14 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { // Check create persistent topic timeout. log.warn("{} future is already completed with failure {}, closing the" + " topic", topic, FutureUtil.getException(topicFuture)); - persistentTopic.getTransactionBuffer() - .closeAsync() - .exceptionally(t -> { - log.error("[{}] Close transactionBuffer failed", topic, t); - return null; - }); - persistentTopic.stopReplProducers() - .whenCompleteAsync((v, exception) -> { - topics.remove(topic, topicFuture); - }, executor()); + executor().submit(() -> { + persistentTopic.close().whenComplete((ignore, ex) -> { + if (ex != null) { + log.warn("[{}] Get an error when closing topic.", + topic, ex); + } + }); + }); } else { addTopicToStatsMaps(topicName, persistentTopic); topicFuture.complete(Optional.of(persistentTopic)); @@ -1715,16 +1778,15 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { .exceptionally((ex) -> { log.warn("Replication or dedup check failed." + " Removing topic from topics list {}, {}", topic, ex); - persistentTopic.getTransactionBuffer() - .closeAsync() - .exceptionally(t -> { - log.error("[{}] Close transactionBuffer failed", topic, t); - return null; - }); - persistentTopic.stopReplProducers().whenCompleteAsync((v, exception) -> { - topics.remove(topic, topicFuture); - topicFuture.completeExceptionally(ex); - }, executor()); + executor().submit(() -> { + persistentTopic.close().whenComplete((ignore, closeEx) -> { + if (closeEx != null) { + log.warn("[{}] Get an error when closing topic.", + topic, closeEx); + } + topicFuture.completeExceptionally(ex); + }); + }); return null; }); } catch (PulsarServerException e) { @@ -1746,7 +1808,7 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { topicFuture.completeExceptionally(new PersistenceException(exception)); } } - }, () -> isTopicNsOwnedByBroker(topicName), null); + }, () -> isTopicNsOwnedByBrokerAsync(topicName), null); }).exceptionally((exception) -> { log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception); @@ -1758,165 +1820,176 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { }); } - public CompletableFuture getManagedLedgerConfig(TopicName topicName) { + public CompletableFuture getManagedLedgerConfig(@Nonnull TopicName topicName) { + final CompletableFuture> topicPoliciesFuture = + getTopicPoliciesBypassSystemTopic(topicName); + return topicPoliciesFuture.thenCompose(optionalTopicPolicies -> + getManagedLedgerConfig(topicName, optionalTopicPolicies.orElse(null))); + } + + private CompletableFuture getManagedLedgerConfig(@Nonnull TopicName topicName, + @Nullable TopicPolicies topicPolicies) { + requireNonNull(topicName); NamespaceName namespace = topicName.getNamespaceObject(); ServiceConfiguration serviceConfig = pulsar.getConfiguration(); NamespaceResources nsr = pulsar.getPulsarResources().getNamespaceResources(); LocalPoliciesResources lpr = pulsar.getPulsarResources().getLocalPolicies(); - return nsr.getPoliciesAsync(namespace) - .thenCombine(lpr.getLocalPoliciesAsync(namespace), (policies, localPolicies) -> { - PersistencePolicies persistencePolicies = null; - RetentionPolicies retentionPolicies = null; - OffloadPoliciesImpl topicLevelOffloadPolicies = null; - - if (pulsar.getConfig().isTopicLevelPoliciesEnabled() - && !NamespaceService.isSystemServiceNamespace(namespace.toString())) { - final TopicPolicies topicPolicies = pulsar.getTopicPoliciesService() - .getTopicPoliciesIfExists(topicName); - if (topicPolicies != null) { - persistencePolicies = topicPolicies.getPersistence(); - retentionPolicies = topicPolicies.getRetentionPolicies(); - topicLevelOffloadPolicies = topicPolicies.getOffloadPolicies(); - } - } - - if (persistencePolicies == null) { - persistencePolicies = policies.map(p -> p.persistence).orElseGet( - () -> new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(), - serviceConfig.getManagedLedgerDefaultWriteQuorum(), - serviceConfig.getManagedLedgerDefaultAckQuorum(), - serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit())); - } + final CompletableFuture> nsPolicies = nsr.getPoliciesAsync(namespace); + final CompletableFuture> lcPolicies = lpr.getLocalPoliciesAsync(namespace); + return nsPolicies.thenCombine(lcPolicies, (policies, localPolicies) -> { + PersistencePolicies persistencePolicies = null; + RetentionPolicies retentionPolicies = null; + OffloadPoliciesImpl topicLevelOffloadPolicies = null; + if (topicPolicies != null) { + persistencePolicies = topicPolicies.getPersistence(); + retentionPolicies = topicPolicies.getRetentionPolicies(); + topicLevelOffloadPolicies = topicPolicies.getOffloadPolicies(); + } - if (retentionPolicies == null) { - retentionPolicies = policies.map(p -> p.retention_policies).orElseGet( - () -> new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(), - serviceConfig.getDefaultRetentionSizeInMB()) - ); - } + if (persistencePolicies == null) { + persistencePolicies = policies.map(p -> p.persistence).orElseGet( + () -> new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(), + serviceConfig.getManagedLedgerDefaultWriteQuorum(), + serviceConfig.getManagedLedgerDefaultAckQuorum(), + serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit())); + } - ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); - managedLedgerConfig.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble()); - managedLedgerConfig.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum()); - managedLedgerConfig.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum()); - - if (serviceConfig.isStrictBookieAffinityEnabled()) { - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( - IsolatedBookieEnsemblePlacementPolicy.class); - if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); - properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } else if (isSystemTopic(topicName)) { - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, "*"); - properties.put(IsolatedBookieEnsemblePlacementPolicy - .SECONDARY_ISOLATION_BOOKIE_GROUPS, "*"); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } else { - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, ""); - properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, ""); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } - } else { - if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( - IsolatedBookieEnsemblePlacementPolicy.class); - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); - properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } + if (retentionPolicies == null) { + if (SystemTopicNames.isSystemTopic(topicName)) { + if (log.isDebugEnabled()) { + log.debug("{} Disable data retention policy for system topic.", topicName); } + retentionPolicies = new RetentionPolicies(0, 0); + } else { + retentionPolicies = policies.map(p -> p.retention_policies).orElseGet( + () -> new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(), + serviceConfig.getDefaultRetentionSizeInMB()) + ); + } + } - managedLedgerConfig.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate()); - managedLedgerConfig.setDigestType(serviceConfig.getManagedLedgerDigestType()); - managedLedgerConfig.setPassword(serviceConfig.getManagedLedgerPassword()); - - managedLedgerConfig - .setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist()); - managedLedgerConfig.setPersistentUnackedRangesWithMultipleEntriesEnabled( - serviceConfig.isPersistentUnackedRangesWithMultipleEntriesEnabled()); - managedLedgerConfig.setMaxUnackedRangesToPersistInMetadataStore( - serviceConfig.getManagedLedgerMaxUnackedRangesToPersistInMetadataStore()); - managedLedgerConfig.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger()); - managedLedgerConfig - .setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(), - TimeUnit.MINUTES); - managedLedgerConfig - .setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(), - TimeUnit.MINUTES); - managedLedgerConfig.setMaxSizePerLedgerMb(serviceConfig.getManagedLedgerMaxSizePerLedgerMbytes()); - - managedLedgerConfig.setMetadataOperationsTimeoutSeconds( - serviceConfig.getManagedLedgerMetadataOperationsTimeoutSeconds()); - managedLedgerConfig - .setReadEntryTimeoutSeconds(serviceConfig.getManagedLedgerReadEntryTimeoutSeconds()); - managedLedgerConfig - .setAddEntryTimeoutSeconds(serviceConfig.getManagedLedgerAddEntryTimeoutSeconds()); - managedLedgerConfig.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize()); - managedLedgerConfig.setUnackedRangesOpenCacheSetEnabled( - serviceConfig.isManagedLedgerUnackedRangesOpenCacheSetEnabled()); - managedLedgerConfig.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum()); - managedLedgerConfig.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum()); - managedLedgerConfig - .setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger()); + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + managedLedgerConfig.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble()); + managedLedgerConfig.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum()); + managedLedgerConfig.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum()); + + if (serviceConfig.isStrictBookieAffinityEnabled()) { + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( + IsolatedBookieEnsemblePlacementPolicy.class); + if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); + properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } else if (isSystemTopic(topicName)) { + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, "*"); + properties.put(IsolatedBookieEnsemblePlacementPolicy + .SECONDARY_ISOLATION_BOOKIE_GROUPS, "*"); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } else { + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, ""); + properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, ""); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } + } else { + if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( + IsolatedBookieEnsemblePlacementPolicy.class); + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); + properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } + } - managedLedgerConfig - .setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds()); - managedLedgerConfig - .setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES); - managedLedgerConfig.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB()); - managedLedgerConfig.setAutoSkipNonRecoverableData(serviceConfig.isAutoSkipNonRecoverableData()); - managedLedgerConfig.setLazyCursorRecovery(serviceConfig.isLazyCursorRecovery()); - managedLedgerConfig.setInactiveLedgerRollOverTime( - serviceConfig.getManagedLedgerInactiveLedgerRolloverTimeSeconds(), TimeUnit.SECONDS); - managedLedgerConfig.setCacheEvictionByMarkDeletedPosition( - serviceConfig.isCacheEvictionByMarkDeletedPosition()); - managedLedgerConfig.setMinimumBacklogCursorsForCaching( - serviceConfig.getManagedLedgerMinimumBacklogCursorsForCaching()); - managedLedgerConfig.setMinimumBacklogEntriesForCaching( - serviceConfig.getManagedLedgerMinimumBacklogEntriesForCaching()); - managedLedgerConfig.setMaxBacklogBetweenCursorsForCaching( - serviceConfig.getManagedLedgerMaxBacklogBetweenCursorsForCaching()); - - OffloadPoliciesImpl nsLevelOffloadPolicies = - (OffloadPoliciesImpl) policies.map(p -> p.offload_policies).orElse(null); - OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.mergeConfiguration( - topicLevelOffloadPolicies, - OffloadPoliciesImpl.oldPoliciesCompatible(nsLevelOffloadPolicies, policies.orElse(null)), - getPulsar().getConfig().getProperties()); - if (NamespaceService.isSystemServiceNamespace(namespace.toString())) { - managedLedgerConfig.setLedgerOffloader(NullLedgerOffloader.INSTANCE); - } else { - if (topicLevelOffloadPolicies != null) { - try { - LedgerOffloader topicLevelLedgerOffLoader = - pulsar().createManagedLedgerOffloader(offloadPolicies); - managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader); - } catch (PulsarServerException e) { - throw new RuntimeException(e); - } - } else { - //If the topic level policy is null, use the namespace level - managedLedgerConfig - .setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies)); - } + managedLedgerConfig.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate()); + managedLedgerConfig.setDigestType(serviceConfig.getManagedLedgerDigestType()); + managedLedgerConfig.setPassword(serviceConfig.getManagedLedgerPassword()); + + managedLedgerConfig + .setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist()); + managedLedgerConfig.setPersistentUnackedRangesWithMultipleEntriesEnabled( + serviceConfig.isPersistentUnackedRangesWithMultipleEntriesEnabled()); + managedLedgerConfig.setMaxUnackedRangesToPersistInMetadataStore( + serviceConfig.getManagedLedgerMaxUnackedRangesToPersistInMetadataStore()); + managedLedgerConfig.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger()); + managedLedgerConfig + .setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(), + TimeUnit.MINUTES); + managedLedgerConfig + .setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(), + TimeUnit.MINUTES); + managedLedgerConfig.setMaxSizePerLedgerMb(serviceConfig.getManagedLedgerMaxSizePerLedgerMbytes()); + + managedLedgerConfig.setMetadataOperationsTimeoutSeconds( + serviceConfig.getManagedLedgerMetadataOperationsTimeoutSeconds()); + managedLedgerConfig + .setReadEntryTimeoutSeconds(serviceConfig.getManagedLedgerReadEntryTimeoutSeconds()); + managedLedgerConfig + .setAddEntryTimeoutSeconds(serviceConfig.getManagedLedgerAddEntryTimeoutSeconds()); + managedLedgerConfig.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize()); + managedLedgerConfig.setUnackedRangesOpenCacheSetEnabled( + serviceConfig.isManagedLedgerUnackedRangesOpenCacheSetEnabled()); + managedLedgerConfig.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum()); + managedLedgerConfig.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum()); + managedLedgerConfig + .setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger()); + + managedLedgerConfig + .setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds()); + managedLedgerConfig + .setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES); + managedLedgerConfig.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB()); + managedLedgerConfig.setAutoSkipNonRecoverableData(serviceConfig.isAutoSkipNonRecoverableData()); + managedLedgerConfig.setLazyCursorRecovery(serviceConfig.isLazyCursorRecovery()); + managedLedgerConfig.setInactiveLedgerRollOverTime( + serviceConfig.getManagedLedgerInactiveLedgerRolloverTimeSeconds(), TimeUnit.SECONDS); + managedLedgerConfig.setCacheEvictionByMarkDeletedPosition( + serviceConfig.isCacheEvictionByMarkDeletedPosition()); + managedLedgerConfig.setMinimumBacklogCursorsForCaching( + serviceConfig.getManagedLedgerMinimumBacklogCursorsForCaching()); + managedLedgerConfig.setMinimumBacklogEntriesForCaching( + serviceConfig.getManagedLedgerMinimumBacklogEntriesForCaching()); + managedLedgerConfig.setMaxBacklogBetweenCursorsForCaching( + serviceConfig.getManagedLedgerMaxBacklogBetweenCursorsForCaching()); + + OffloadPoliciesImpl nsLevelOffloadPolicies = + (OffloadPoliciesImpl) policies.map(p -> p.offload_policies).orElse(null); + OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.mergeConfiguration( + topicLevelOffloadPolicies, + OffloadPoliciesImpl.oldPoliciesCompatible(nsLevelOffloadPolicies, policies.orElse(null)), + getPulsar().getConfig().getProperties()); + if (NamespaceService.isSystemServiceNamespace(namespace.toString())) { + managedLedgerConfig.setLedgerOffloader(NullLedgerOffloader.INSTANCE); + } else { + if (topicLevelOffloadPolicies != null) { + try { + LedgerOffloader topicLevelLedgerOffLoader = + pulsar().createManagedLedgerOffloader(offloadPolicies); + managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader); + } catch (PulsarServerException e) { + throw new RuntimeException(e); } + } else { + //If the topic level policy is null, use the namespace level + managedLedgerConfig + .setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies)); + } + } - managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled( - serviceConfig.isAcknowledgmentAtBatchIndexLevelEnabled()); - managedLedgerConfig.setNewEntriesCheckDelayInMillis( - serviceConfig.getManagedLedgerNewEntriesCheckDelayInMillis()); - return managedLedgerConfig; - }); + managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled( + serviceConfig.isAcknowledgmentAtBatchIndexLevelEnabled()); + managedLedgerConfig.setNewEntriesCheckDelayInMillis( + serviceConfig.getManagedLedgerNewEntriesCheckDelayInMillis()); + return managedLedgerConfig; + }); } private void addTopicToStatsMaps(TopicName topicName, Topic topic) { @@ -1944,7 +2017,7 @@ private void addTopicToStatsMaps(TopicName topicName, Topic topic) { } public void refreshTopicToStatsMaps(NamespaceBundle oldBundle) { - Objects.requireNonNull(oldBundle); + requireNonNull(oldBundle); try { // retrieve all topics under existing old bundle List topics = getAllTopicsFromNamespaceBundle(oldBundle.getNamespaceObject().toString(), @@ -2127,13 +2200,16 @@ public void monitorBacklogQuota() { }); } - public boolean isTopicNsOwnedByBroker(TopicName topicName) { - try { - return pulsar.getNamespaceService().isServiceUnitOwned(topicName); - } catch (Exception e) { - log.warn("Failed to check the ownership of the topic: {}, {}", topicName, e.getMessage()); - } - return false; + public CompletableFuture isTopicNsOwnedByBrokerAsync(TopicName topicName) { + return pulsar.getNamespaceService().isServiceUnitOwnedAsync(topicName) + .handle((hasOwnership, t) -> { + if (t == null) { + return hasOwnership; + } else { + log.warn("Failed to check the ownership of the topic: {}, {}", topicName, t.getMessage()); + return false; + } + }); } public CompletableFuture checkTopicNsOwnership(final String topic) { @@ -2144,9 +2220,9 @@ public CompletableFuture checkTopicNsOwnership(final String topic) { if (ownedByThisInstance) { return CompletableFuture.completedFuture(null); } else { - String msg = String.format("Namespace bundle for topic (%s) not served by this instance. " - + "Please redo the lookup. Request is denied: namespace=%s", topic, - topicName.getNamespace()); + String msg = String.format("Namespace bundle for topic (%s) not served by this instance:%s. " + + "Please redo the lookup. Request is denied: namespace=%s", + topic, pulsar.getBrokerId(), topicName.getNamespace()); log.warn(msg); return FutureUtil.failedFuture(new ServiceUnitNotReadyException(msg)); } @@ -2184,6 +2260,21 @@ private CompletableFuture unloadServiceUnit(NamespaceBundle serviceUnit if (serviceUnit.includes(topicName)) { // Topic needs to be unloaded log.info("[{}] Unloading topic", topicName); + if (topicFuture.isCompletedExceptionally()) { + try { + topicFuture.get(); + } catch (InterruptedException | ExecutionException ex) { + if (ex.getCause() instanceof ServiceUnitNotReadyException) { + // Topic was already unloaded + if (log.isDebugEnabled()) { + log.debug("[{}] Topic was already unloaded", topicName); + } + return; + } else { + log.warn("[{}] Got exception when closing topic", topicName, ex); + } + } + } closeFutures.add(topicFuture .thenCompose(t -> t.isPresent() ? t.get().close(closeWithoutWaitingClientDisconnect) : CompletableFuture.completedFuture(null))); @@ -2218,10 +2309,6 @@ public AuthorizationService getAuthorizationService() { return authorizationService; } - public CompletableFuture removeTopicFromCache(String topicName) { - return removeTopicFutureFromCache(topicName, null); - } - public CompletableFuture removeTopicFromCache(Topic topic) { Optional>> createTopicFuture = findTopicFutureInCache(topic); if (createTopicFuture.isEmpty()){ @@ -2723,7 +2810,7 @@ private void updateMaxPublishRatePerTopicInMessages() { forEachTopic(topic -> { if (topic instanceof AbstractTopic) { ((AbstractTopic) topic).updateBrokerPublishRate(); - ((AbstractTopic) topic).updatePublishDispatcher(); + ((AbstractTopic) topic).updatePublishRateLimiter(); } })); } @@ -3010,7 +3097,10 @@ private void createPendingLoadTopic() { CompletableFuture> pendingFuture = pendingTopic.getTopicFuture(); final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get(); final boolean acquiredPermit = topicLoadSemaphore.tryAcquire(); - checkOwnershipAndCreatePersistentTopic(topic, true, pendingFuture, pendingTopic.getProperties()); + checkOwnershipAndCreatePersistentTopic(topic, + true, + pendingFuture, + pendingTopic.getProperties(), pendingTopic.getTopicPolicies()); pendingFuture.handle((persistentTopic, ex) -> { // release permit and process next pending topic if (acquiredPermit) { @@ -3034,12 +3124,9 @@ public CompletableFuture fetchPartitionedTopicMetadata if (pulsar.getNamespaceService() == null) { return FutureUtil.failedFuture(new NamingException("namespace service is not ready")); } - Optional policies = - pulsar.getPulsarResources().getNamespaceResources() - .getPoliciesIfCached(topicName.getNamespaceObject()); - return pulsar.getNamespaceService().checkTopicExists(topicName) - .thenCompose(topicExists -> { - return fetchPartitionedTopicMetadataAsync(topicName) + return pulsar.getPulsarResources().getNamespaceResources().getPoliciesAsync(topicName.getNamespaceObject()) + .thenCompose(policies -> pulsar.getNamespaceService().checkTopicExists(topicName) + .thenCompose(topicExists -> fetchPartitionedTopicMetadataAsync(topicName) .thenCompose(metadata -> { CompletableFuture future = new CompletableFuture<>(); @@ -3052,7 +3139,7 @@ public CompletableFuture fetchPartitionedTopicMetadata && !topicExists && !topicName.isPartitioned() && pulsar.getBrokerService() - .isDefaultTopicTypePartitioned(topicName, policies)) { + .isDefaultTopicTypePartitioned(topicName, policies)) { isAllowAutoTopicCreationAsync(topicName, policies).thenAccept(allowed -> { if (allowed) { pulsar.getBrokerService() @@ -3061,9 +3148,12 @@ public CompletableFuture fetchPartitionedTopicMetadata .exceptionally(ex -> { if (ex.getCause() instanceof MetadataStoreException - .AlreadyExistsException) { + .AlreadyExistsException) { + log.info("[{}] The partitioned topic is already" + + " created, try to refresh the cache and read" + + " again.", topicName); // The partitioned topic might be created concurrently - fetchPartitionedTopicMetadataAsync(topicName) + fetchPartitionedTopicMetadataAsync(topicName, true) .whenComplete((metadata2, ex2) -> { if (ex2 == null) { future.complete(metadata2); @@ -3072,6 +3162,9 @@ public CompletableFuture fetchPartitionedTopicMetadata } }); } else { + log.error("[{}] operation of creating partitioned" + + " topic metadata failed", + topicName, ex); future.completeExceptionally(ex); } return null; @@ -3089,8 +3182,7 @@ public CompletableFuture fetchPartitionedTopicMetadata }); return future; - }); - }); + }))); } @SuppressWarnings("deprecation") @@ -3118,9 +3210,14 @@ private CompletableFuture createDefaultPartitionedTopi } public CompletableFuture fetchPartitionedTopicMetadataAsync(TopicName topicName) { + return fetchPartitionedTopicMetadataAsync(topicName, false); + } + + public CompletableFuture fetchPartitionedTopicMetadataAsync(TopicName topicName, + boolean refreshCacheAndGet) { // gets the number of partitions from the configuration cache return pulsar.getPulsarResources().getNamespaceResources().getPartitionedTopicResources() - .getPartitionedTopicMetadataAsync(topicName).thenApply(metadata -> { + .getPartitionedTopicMetadataAsync(topicName, refreshCacheAndGet).thenApply(metadata -> { // if the partitioned topic is not found in metadata, then the topic is not partitioned return metadata.orElseGet(() -> new PartitionedTopicMetadata()); }); @@ -3282,10 +3379,9 @@ public CompletableFuture isAllowAutoTopicCreationAsync(final String top } public CompletableFuture isAllowAutoTopicCreationAsync(final TopicName topicName) { - Optional policies = - pulsar.getPulsarResources().getNamespaceResources() - .getPoliciesIfCached(topicName.getNamespaceObject()); - return isAllowAutoTopicCreationAsync(topicName, policies); + return pulsar.getPulsarResources().getNamespaceResources() + .getPoliciesAsync(topicName.getNamespaceObject()) + .thenCompose(policies -> isAllowAutoTopicCreationAsync(topicName, policies)); } private CompletableFuture isAllowAutoTopicCreationAsync(final TopicName topicName, @@ -3295,10 +3391,19 @@ private CompletableFuture isAllowAutoTopicCreationAsync(final TopicName topicName.getNamespaceObject()); return CompletableFuture.completedFuture(false); } - //System topic can always be created automatically + + // ExtensibleLoadManagerImpl.internal topics expects to be non-partitioned-topics now. + // We don't allow the auto-creation here. + // ExtensibleLoadManagerImpl.start() is responsible to create the internal system topics. + if (ExtensibleLoadManagerImpl.isInternalTopic(topicName.toString())) { + return CompletableFuture.completedFuture(false); + } + + //Other system topics can be created automatically if (pulsar.getConfiguration().isSystemTopicEnabled() && isSystemTopic(topicName)) { return CompletableFuture.completedFuture(true); } + final boolean allowed; AutoTopicCreationOverride autoTopicCreationOverride = getAutoTopicCreationOverride(topicName, policies); if (autoTopicCreationOverride != null) { @@ -3346,11 +3451,23 @@ private AutoTopicCreationOverride getAutoTopicCreationOverride(final TopicName t return null; } + /** + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking + * call. we can use #{@link BrokerService#isAllowAutoSubscriptionCreationAsync(TopicName)} to instead of it. + */ + @Deprecated public boolean isAllowAutoSubscriptionCreation(final String topic) { TopicName topicName = TopicName.get(topic); return isAllowAutoSubscriptionCreation(topicName); } + /** + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking + * call. we can use #{@link BrokerService#isAllowAutoSubscriptionCreationAsync(TopicName)} to instead of it. + */ + @Deprecated public boolean isAllowAutoSubscriptionCreation(final TopicName topicName) { AutoSubscriptionCreationOverride autoSubscriptionCreationOverride = getAutoSubscriptionCreationOverride(topicName); @@ -3361,6 +3478,12 @@ public boolean isAllowAutoSubscriptionCreation(final TopicName topicName) { } } + /** + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking + * call. we can use #{@link BrokerService#isAllowAutoSubscriptionCreationAsync(TopicName)} to instead of it. + */ + @Deprecated private AutoSubscriptionCreationOverride getAutoSubscriptionCreationOverride(final TopicName topicName) { Optional topicPolicies = getTopicPolicies(topicName); if (topicPolicies.isPresent() && topicPolicies.get().getAutoSubscriptionCreationOverride() != null) { @@ -3377,6 +3500,25 @@ private AutoSubscriptionCreationOverride getAutoSubscriptionCreationOverride(fin return null; } + public @Nonnull CompletionStage isAllowAutoSubscriptionCreationAsync(@Nonnull TopicName tpName) { + requireNonNull(tpName); + // topic level policies + final var topicPolicies = getTopicPolicies(tpName); + if (topicPolicies.isPresent() && topicPolicies.get().getAutoSubscriptionCreationOverride() != null) { + return CompletableFuture.completedFuture(topicPolicies.get().getAutoSubscriptionCreationOverride() + .isAllowAutoSubscriptionCreation()); + } + // namespace level policies + return pulsar.getPulsarResources().getNamespaceResources().getPoliciesAsync(tpName.getNamespaceObject()) + .thenApply(policies -> { + if (policies.isPresent() && policies.get().autoSubscriptionCreationOverride != null) { + return policies.get().autoSubscriptionCreationOverride.isAllowAutoSubscriptionCreation(); + } + // broker level policies + return pulsar.getConfiguration().isAllowAutoSubscriptionCreation(); + }); + } + public boolean isSystemTopic(String topic) { return isSystemTopic(TopicName.get(topic)); } @@ -3408,6 +3550,24 @@ public CompletableFuture deleteTopicPolicies(TopicName topicName) { .deleteTopicPoliciesAsync(TopicName.get(topicName.getPartitionedTopicName())); } + public CompletableFuture deleteSchema(TopicName topicName) { + // delete schema at the upper level when deleting the partitioned topic. + if (topicName.isPartitioned()) { + return CompletableFuture.completedFuture(null); + } + String base = topicName.getPartitionedTopicName(); + String id = TopicName.get(base).getSchemaName(); + return getPulsar().getSchemaRegistryService().deleteSchemaStorage(id).whenComplete((vid, ex) -> { + if (vid != null && ex == null) { + // It's different from `SchemasResource.deleteSchema` + // because when we delete a topic, the schema + // history is meaningless. But when we delete a schema of a topic, a new schema could be + // registered in the future. + log.info("Deleted schema storage of id: {}", id); + } + }); + } + private CompletableFuture checkMaxTopicsPerNamespace(TopicName topicName, int numPartitions) { return pulsar.getPulsarResources().getNamespaceResources() .getPoliciesAsync(topicName.getNamespaceObject()) @@ -3427,7 +3587,7 @@ private CompletableFuture checkMaxTopicsPerNamespace(TopicName topicName, log.error("Failed to create persistent topic {}, " + "exceed maximum number of topics in namespace", topicName); return FutureUtil.failedFuture( - new RestException(Response.Status.PRECONDITION_FAILED, + new NotAllowedException( "Exceed maximum number of topics in namespace.")); } else { return CompletableFuture.completedFuture(null); @@ -3510,5 +3670,6 @@ private static class TopicLoadingContext { private final String topic; private final CompletableFuture> topicFuture; private final Map properties; + private final TopicPolicies topicPolicies; } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java index 3e77588b2459f..831d6068e2097 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java @@ -258,6 +258,8 @@ private static ServerError getClientErrorCode(Throwable t, boolean checkCauseIfU return ServerError.ServiceNotReady; } else if (t instanceof TopicNotFoundException) { return ServerError.TopicNotFound; + } else if (t instanceof SubscriptionNotFoundException) { + return ServerError.SubscriptionNotFound; } else if (t instanceof IncompatibleSchemaException || t instanceof InvalidSchemaDataException) { // for backward compatible with old clients, invalid schema data diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java index ea491bd40d332..b2b2b512c8cfc 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java @@ -39,7 +39,8 @@ * number of keys assigned to each consumer. */ public class ConsistentHashingStickyKeyConsumerSelector implements StickyKeyConsumerSelector { - + // use NUL character as field separator for hash key calculation + private static final String KEY_SEPARATOR = "\0"; private final ReadWriteLock rwLock = new ReentrantReadWriteLock(); // Consistent-Hash ring @@ -59,8 +60,7 @@ public CompletableFuture addConsumer(Consumer consumer) { // Insert multiple points on the hash ring for every consumer // The points are deterministically added based on the hash of the consumer name for (int i = 0; i < numberOfPoints; i++) { - String key = consumer.consumerName() + i; - int hash = Murmur3_32Hash.getInstance().makeHash(key.getBytes()); + int hash = calculateHashForConsumerAndIndex(consumer, i); hashRing.compute(hash, (k, v) -> { if (v == null) { return Lists.newArrayList(consumer); @@ -79,14 +79,18 @@ public CompletableFuture addConsumer(Consumer consumer) { } } + private static int calculateHashForConsumerAndIndex(Consumer consumer, int index) { + String key = consumer.consumerName() + KEY_SEPARATOR + index; + return Murmur3_32Hash.getInstance().makeHash(key.getBytes()); + } + @Override public void removeConsumer(Consumer consumer) { rwLock.writeLock().lock(); try { // Remove all the points that were added for this consumer for (int i = 0; i < numberOfPoints; i++) { - String key = consumer.consumerName() + i; - int hash = Murmur3_32Hash.getInstance().makeHash(key.getBytes()); + int hash = calculateHashForConsumerAndIndex(consumer, i); hashRing.compute(hash, (k, v) -> { if (v == null) { return null; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java index 1ee3f513ef288..d8ed99bb87436 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java @@ -43,6 +43,7 @@ import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.tuple.MutablePair; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.MessageId; @@ -56,6 +57,7 @@ import org.apache.pulsar.common.api.proto.MessageIdData; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData.ClusterUrl; +import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.policies.data.stats.ConsumerStatsImpl; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.schema.SchemaType; @@ -113,6 +115,9 @@ public class Consumer { private final ConsumerStatsImpl stats; private final boolean isDurable; + + private final boolean isPersistentTopic; + private static final AtomicIntegerFieldUpdater UNACKED_MESSAGES_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Consumer.class, "unackedMessages"); private volatile int unackedMessages = 0; @@ -170,6 +175,7 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo this.readCompacted = readCompacted; this.consumerName = consumerName; this.isDurable = isDurable; + this.isPersistentTopic = subscription.getTopic() instanceof PersistentTopic; this.keySharedMeta = keySharedMeta; this.cnx = cnx; this.msgOut = new Rate(); @@ -241,6 +247,7 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo this.pendingAcks = null; this.stats = null; this.isDurable = false; + this.isPersistentTopic = false; this.metadata = null; this.keySharedMeta = null; this.clientAddress = null; @@ -320,7 +327,7 @@ public Future sendMessages(final List entries, EntryBatch if (pendingAcks != null) { int batchSize = batchSizes.getBatchSize(i); int stickyKeyHash = getStickyKeyHash(entry); - long[] ackSet = getCursorAckSet(PositionImpl.get(entry.getLedgerId(), entry.getEntryId())); + long[] ackSet = batchIndexesAcks == null ? null : batchIndexesAcks.getAckSet(i); if (ackSet != null) { unackedMessages -= (batchSize - BitSet.valueOf(ackSet).cardinality()); } @@ -363,6 +370,12 @@ public Future sendMessages(final List entries, EntryBatch msgOutCounter.add(totalMessages); bytesOutCounter.add(totalBytes); chunkedMessageRate.recordMultipleEvents(totalChunkedMessages, 0); + } else { + if (log.isDebugEnabled()) { + log.debug("[{}-{}] Sent messages to client fail by IO exception[{}], close the connection" + + " immediately. Consumer: {}", topicName, subscription, + status.cause() == null ? "" : status.cause().getMessage(), this.toString()); + } } }); return writeAndFlushPromise; @@ -503,17 +516,17 @@ private CompletableFuture individualAckNormal(CommandAck ack, Map checkPermissionsAsync() { TopicName topicName = TopicName.get(subscription.getTopicName()); if (cnx.getBrokerService().getAuthorizationService() != null) { - return cnx.getBrokerService().getAuthorizationService().canConsumeAsync(topicName, appId, - cnx.getAuthenticationData(), subscription.getName()) + AuthenticationDataSubscription authData = + new AuthenticationDataSubscription(cnx.getAuthenticationData(), subscription.getName()); + return cnx.getBrokerService().getAuthorizationService() + .allowTopicOperationAsync(topicName, TopicOperation.CONSUME, appId, authData) .handle((ok, e) -> { if (e != null) { log.warn("[{}] Get unexpected error while authorizing [{}] {}", appId, @@ -943,7 +959,7 @@ public int hashCode() { * * @param position */ - private void removePendingAcks(PositionImpl position) { + private boolean removePendingAcks(PositionImpl position) { Consumer ackOwnedConsumer = null; if (pendingAcks.get(position.getLedgerId(), position.getEntryId()) == null) { for (Consumer consumer : subscription.getConsumers()) { @@ -964,7 +980,7 @@ private void removePendingAcks(PositionImpl position) { if (ackedPosition != null) { if (!ackOwnedConsumer.getPendingAcks().remove(position.getLedgerId(), position.getEntryId())) { // Message was already removed by the other consumer - return; + return false; } if (log.isDebugEnabled()) { log.debug("[{}-{}] consumer {} received ack {}", topicName, subscription, consumerId, position); @@ -978,7 +994,9 @@ private void removePendingAcks(PositionImpl position) { ackOwnedConsumer.blockedConsumerOnUnackedMsgs = false; flowConsumerBlockedPermits(ackOwnedConsumer); } + return true; } + return false; } public ConcurrentLongLongPairHashMap getPendingAcks() { @@ -1064,7 +1082,7 @@ public Subscription getSubscription() { private int addAndGetUnAckedMsgs(Consumer consumer, int ackedMessages) { int unackedMsgs = 0; - if (Subscription.isIndividualAckMode(subType)) { + if (isPersistentTopic && Subscription.isIndividualAckMode(subType)) { subscription.addUnAckedMessages(ackedMessages); unackedMsgs = UNACKED_MESSAGES_UPDATER.addAndGet(consumer, ackedMessages); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java index 4a9b33a9afdb1..03d6f0750e02e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/EntryFilterSupport.java @@ -35,7 +35,8 @@ public class EntryFilterSupport { public EntryFilterSupport(Subscription subscription) { this.subscription = subscription; - if (subscription != null && subscription.getTopic() != null) { + if (subscription != null && subscription.getTopic() != null + && !subscription.getTopic().isSystemTopic()) { final BrokerService brokerService = subscription.getTopic().getBrokerService(); final boolean allowOverrideEntryFilters = brokerService .pulsar().getConfiguration().isAllowOverrideEntryFilters(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisePublishLimiter.java similarity index 93% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java rename to pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisePublishLimiter.java index 6e215b3b49515..ce14f6d7dd71e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisePublishLimiter.java @@ -24,7 +24,7 @@ import org.apache.pulsar.common.util.RateLimitFunction; import org.apache.pulsar.common.util.RateLimiter; -public class PrecisPublishLimiter implements PublishRateLimiter { +public class PrecisePublishLimiter implements PublishRateLimiter { protected volatile int publishMaxMessageRate = 0; protected volatile long publishMaxByteRate = 0; // precise mode for publish rate limiter @@ -33,18 +33,18 @@ public class PrecisPublishLimiter implements PublishRateLimiter { private final RateLimitFunction rateLimitFunction; private final ScheduledExecutorService scheduledExecutorService; - public PrecisPublishLimiter(Policies policies, String clusterName, RateLimitFunction rateLimitFunction) { + public PrecisePublishLimiter(Policies policies, String clusterName, RateLimitFunction rateLimitFunction) { this.rateLimitFunction = rateLimitFunction; update(policies, clusterName); this.scheduledExecutorService = null; } - public PrecisPublishLimiter(PublishRate publishRate, RateLimitFunction rateLimitFunction) { + public PrecisePublishLimiter(PublishRate publishRate, RateLimitFunction rateLimitFunction) { this(publishRate, rateLimitFunction, null); } - public PrecisPublishLimiter(PublishRate publishRate, RateLimitFunction rateLimitFunction, - ScheduledExecutorService scheduledExecutorService) { + public PrecisePublishLimiter(PublishRate publishRate, RateLimitFunction rateLimitFunction, + ScheduledExecutorService scheduledExecutorService) { this.rateLimitFunction = rateLimitFunction; update(publishRate); this.scheduledExecutorService = scheduledExecutorService; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java index 5b62e3261e64f..53b79f06e8e24 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java @@ -51,6 +51,7 @@ import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData.ClusterUrl; +import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.policies.data.stats.NonPersistentPublisherStatsImpl; import org.apache.pulsar.common.policies.data.stats.PublisherStatsImpl; import org.apache.pulsar.common.protocol.Commands; @@ -781,7 +782,7 @@ public CompletableFuture checkPermissionsAsync() { TopicName topicName = TopicName.get(topic.getName()); if (cnx.getBrokerService().getAuthorizationService() != null) { return cnx.getBrokerService().getAuthorizationService() - .canProduceAsync(topicName, appId, cnx.getAuthenticationData()) + .allowTopicOperationAsync(topicName, TopicOperation.PRODUCE, appId, cnx.getAuthenticationData()) .handle((ok, ex) -> { if (ex != null) { log.warn("[{}] Get unexpected error while autorizing [{}] {}", appId, topic.getName(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarCommandSenderImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarCommandSenderImpl.java index dd74fc4e71ed2..105650caaaf13 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarCommandSenderImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarCommandSenderImpl.java @@ -356,12 +356,18 @@ public void sendEndTxnErrorResponse(long requestId, TxnID txnID, ServerError err writeAndFlush(outBuf); } + /*** + * @param topics topic names which are matching, the topic name contains the partition suffix. + */ @Override public void sendWatchTopicListSuccess(long requestId, long watcherId, String topicsHash, List topics) { BaseCommand command = Commands.newWatchTopicListSuccess(requestId, watcherId, topicsHash, topics); interceptAndWriteCommand(command); } + /*** + * {@inheritDoc} + */ @Override public void sendWatchTopicListUpdate(long watcherId, List newTopics, List deletedTopics, String topicsHash) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java index 01274ab4460cb..de96a317205bb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java @@ -55,6 +55,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; @@ -129,6 +130,7 @@ import org.apache.pulsar.common.api.proto.CommandUnsubscribe; import org.apache.pulsar.common.api.proto.CommandWatchTopicList; import org.apache.pulsar.common.api.proto.CommandWatchTopicListClose; +import org.apache.pulsar.common.api.proto.CompressionType; import org.apache.pulsar.common.api.proto.FeatureFlags; import org.apache.pulsar.common.api.proto.KeySharedMeta; import org.apache.pulsar.common.api.proto.KeySharedMode; @@ -141,6 +143,8 @@ import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.api.proto.SingleMessageMetadata; import org.apache.pulsar.common.api.proto.TxnAction; +import org.apache.pulsar.common.compression.CompressionCodec; +import org.apache.pulsar.common.compression.CompressionCodecProvider; import org.apache.pulsar.common.intercept.InterceptException; import org.apache.pulsar.common.naming.Metadata; import org.apache.pulsar.common.naming.NamespaceName; @@ -984,7 +988,6 @@ protected void handleConnect(CommandConnect connect) { try { byte[] authData = connect.hasAuthData() ? connect.getAuthData() : emptyArray; AuthData clientData = AuthData.of(authData); - // init authentication if (connect.hasAuthMethodName()) { authMethod = connect.getAuthMethodName(); @@ -1043,10 +1046,22 @@ protected void handleConnect(CommandConnect connect) { .getAuthenticationService() .getAuthenticationProvider(originalAuthMethod); + /** + * When both the broker and the proxy are configured with anonymousUserRole + * if the client does not configure an authentication method + * the proxy side will set the value of anonymousUserRole to clientAuthRole when it creates a connection + * and the value of clientAuthMethod will be none. + * Similarly, should also set the value of authRole to anonymousUserRole on the broker side. + */ if (originalAuthenticationProvider == null) { - throw new AuthenticationException( - String.format("Can't find AuthenticationProvider for original role" - + " using auth method [%s] is not available", originalAuthMethod)); + authRole = getBrokerService().getAuthenticationService().getAnonymousUserRole() + .orElseThrow(() -> + new AuthenticationException("No anonymous role, and can't find " + + "AuthenticationProvider for original role using auth method " + + "[" + originalAuthMethod + "] is not available")); + originalPrincipal = authRole; + completeConnect(clientProtocolVersion, clientVersion); + return; } originalAuthDataCopy = AuthData.of(connect.getOriginalAuthData().getBytes()); @@ -1155,7 +1170,8 @@ protected void handleSubscribe(final CommandSubscribe subscribe) { remoteAddress, getPrincipal()); } - log.info("[{}] Subscribing on topic {} / {}", remoteAddress, topicName, subscriptionName); + log.info("[{}] Subscribing on topic {} / {}. consumerId: {}", this.ctx().channel().toString(), + topicName, subscriptionName, consumerId); try { Metadata.validateMetadata(metadata, service.getPulsar().getConfiguration().getMaxConsumerMetadataSize()); @@ -1186,7 +1202,7 @@ protected void handleSubscribe(final CommandSubscribe subscribe) { "Consumer that failed is already present on the connection"); } else { Consumer consumer = existingConsumerFuture.getNow(null); - log.info("[{}] Consumer with the same id is already created:" + log.warn("[{}] Consumer with the same id is already created:" + " consumerId={}, consumer={}", remoteAddress, consumerId, consumer); commandSender.sendSuccessResponse(requestId); @@ -1204,39 +1220,43 @@ protected void handleSubscribe(final CommandSubscribe subscribe) { .failedFuture(new TopicNotFoundException( "Topic " + topicName + " does not exist")); } + final Topic topic = optTopic.get(); + return service.isAllowAutoSubscriptionCreationAsync(topicName) + .thenCompose(isAllowedAutoSubscriptionCreation -> { + boolean rejectSubscriptionIfDoesNotExist = isDurable + && !isAllowedAutoSubscriptionCreation + && !topic.getSubscriptions().containsKey(subscriptionName) + && topic.isPersistent(); + + if (rejectSubscriptionIfDoesNotExist) { + return FutureUtil + .failedFuture( + new SubscriptionNotFoundException( + "Subscription does not exist")); + } - Topic topic = optTopic.get(); - - boolean rejectSubscriptionIfDoesNotExist = isDurable - && !service.isAllowAutoSubscriptionCreation(topicName.toString()) - && !topic.getSubscriptions().containsKey(subscriptionName) - && topic.isPersistent(); - - if (rejectSubscriptionIfDoesNotExist) { - return FutureUtil - .failedFuture( - new SubscriptionNotFoundException( - "Subscription does not exist")); - } - - SubscriptionOption option = SubscriptionOption.builder().cnx(ServerCnx.this) - .subscriptionName(subscriptionName) - .consumerId(consumerId).subType(subType).priorityLevel(priorityLevel) - .consumerName(consumerName).isDurable(isDurable) - .startMessageId(startMessageId).metadata(metadata).readCompacted(readCompacted) - .initialPosition(initialPosition) - .startMessageRollbackDurationSec(startMessageRollbackDurationSec) - .replicatedSubscriptionStateArg(isReplicated).keySharedMeta(keySharedMeta) - .subscriptionProperties(subscriptionProperties) - .consumerEpoch(consumerEpoch) - .schemaType(schema == null ? null : schema.getType()) - .build(); - if (schema != null && schema.getType() != SchemaType.AUTO_CONSUME) { - return topic.addSchemaIfIdleOrCheckCompatible(schema) - .thenCompose(v -> topic.subscribe(option)); - } else { - return topic.subscribe(option); - } + SubscriptionOption option = SubscriptionOption.builder().cnx(ServerCnx.this) + .subscriptionName(subscriptionName) + .consumerId(consumerId).subType(subType) + .priorityLevel(priorityLevel) + .consumerName(consumerName).isDurable(isDurable) + .startMessageId(startMessageId).metadata(metadata) + .readCompacted(readCompacted) + .initialPosition(initialPosition) + .startMessageRollbackDurationSec(startMessageRollbackDurationSec) + .replicatedSubscriptionStateArg(isReplicated) + .keySharedMeta(keySharedMeta) + .subscriptionProperties(subscriptionProperties) + .consumerEpoch(consumerEpoch) + .schemaType(schema == null ? null : schema.getType()) + .build(); + if (schema != null && schema.getType() != SchemaType.AUTO_CONSUME) { + return topic.addSchemaIfIdleOrCheckCompatible(schema) + .thenCompose(v -> topic.subscribe(option)); + } else { + return topic.subscribe(option); + } + }); }) .thenAccept(consumer -> { if (consumerFuture.complete(consumer)) { @@ -1349,7 +1369,7 @@ protected void handleProducer(final CommandProducer cmdProducer) { cmdProducer.hasInitialSubscriptionName() ? cmdProducer.getInitialSubscriptionName() : null; final boolean supportsPartialProducer = supportsPartialProducer(); - TopicName topicName = validateTopicName(cmdProducer.getTopic(), requestId, cmdProducer); + final TopicName topicName = validateTopicName(cmdProducer.getTopic(), requestId, cmdProducer); if (topicName == null) { return; } @@ -1380,36 +1400,36 @@ protected void handleProducer(final CommandProducer cmdProducer) { CompletableFuture existingProducerFuture = producers.putIfAbsent(producerId, producerFuture); if (existingProducerFuture != null) { - if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) { - Producer producer = existingProducerFuture.getNow(null); - log.info("[{}] Producer with the same id is already created:" - + " producerId={}, producer={}", remoteAddress, producerId, producer); - commandSender.sendProducerSuccessResponse(requestId, producer.getProducerName(), - producer.getSchemaVersion()); - return null; - } else { + if (!existingProducerFuture.isDone()) { // There was an early request to create a producer with same producerId. // This can happen when client timeout is lower than the broker timeouts. // We need to wait until the previous producer creation request // either complete or fails. - ServerError error = null; - if (!existingProducerFuture.isDone()) { - error = ServerError.ServiceNotReady; - } else { - error = getErrorCode(existingProducerFuture); - // remove producer with producerId as it's already completed with exception - producers.remove(producerId, existingProducerFuture); - } log.warn("[{}][{}] Producer with id is already present on the connection, producerId={}", remoteAddress, topicName, producerId); - commandSender.sendErrorResponse(requestId, error, "Producer is already present on the connection"); - return null; + commandSender.sendErrorResponse(requestId, ServerError.ServiceNotReady, + "Producer is already present on the connection"); + } else if (existingProducerFuture.isCompletedExceptionally()) { + // remove producer with producerId as it's already completed with exception + log.warn("[{}][{}] Producer with id is failed to register present on the connection, producerId={}", + remoteAddress, topicName, producerId); + ServerError error = getErrorCode(existingProducerFuture); + producers.remove(producerId, existingProducerFuture); + commandSender.sendErrorResponse(requestId, error, + "Producer is already failed to register present on the connection"); + } else { + Producer producer = existingProducerFuture.getNow(null); + log.info("[{}] [{}] Producer with the same id is already created:" + + " producerId={}, producer={}", remoteAddress, topicName, producerId, producer); + commandSender.sendProducerSuccessResponse(requestId, producer.getProducerName(), + producer.getSchemaVersion()); } + return null; } if (log.isDebugEnabled()) { - log.debug("[{}][{}] Creating producer. producerId={}, schema is {}", remoteAddress, topicName, - producerId, schema == null ? "absent" : "present"); + log.debug("[{}][{}] Creating producer. producerId={}, producerName={}, schema is {}", remoteAddress, + topicName, producerId, producerName, schema == null ? "absent" : "present"); } service.getOrCreateTopic(topicName.toString()).thenCompose((Topic topic) -> { @@ -1455,33 +1475,38 @@ protected void handleProducer(final CommandProducer cmdProducer) { schemaVersionFuture.thenAccept(schemaVersion -> { topic.checkIfTransactionBufferRecoverCompletely(isTxnEnabled).thenAccept(future -> { - CompletableFuture createInitSubFuture; + CompletionStage createInitSubFuture; if (!Strings.isNullOrEmpty(initialSubscriptionName) && topic.isPersistent() && !topic.getSubscriptions().containsKey(initialSubscriptionName)) { - if (!this.getBrokerService().isAllowAutoSubscriptionCreation(topicName)) { - String msg = - "Could not create the initial subscription due to the auto subscription " - + "creation is not allowed."; - if (producerFuture.completeExceptionally( - new BrokerServiceException.NotAllowedException(msg))) { - log.warn("[{}] {} initialSubscriptionName: {}, topic: {}", - remoteAddress, msg, initialSubscriptionName, topicName); - commandSender.sendErrorResponse(requestId, - ServerError.NotAllowedError, msg); - } - producers.remove(producerId, producerFuture); - return; - } - createInitSubFuture = - topic.createSubscription(initialSubscriptionName, InitialPosition.Earliest, - false, null); + createInitSubFuture = service.isAllowAutoSubscriptionCreationAsync(topicName) + .thenCompose(isAllowAutoSubscriptionCreation -> { + if (!isAllowAutoSubscriptionCreation) { + return CompletableFuture.failedFuture( + new BrokerServiceException.NotAllowedException( + "Could not create the initial subscription due to" + + " the auto subscription creation is not allowed.")); + } + return topic.createSubscription(initialSubscriptionName, + InitialPosition.Earliest, false, null); + }); } else { createInitSubFuture = CompletableFuture.completedFuture(null); } createInitSubFuture.whenComplete((sub, ex) -> { if (ex != null) { + final Throwable rc = FutureUtil.unwrapCompletionException(ex); + if (rc instanceof BrokerServiceException.NotAllowedException) { + log.warn("[{}] {} initialSubscriptionName: {}, topic: {}", + remoteAddress, rc.getMessage(), initialSubscriptionName, topicName); + if (producerFuture.completeExceptionally(rc)) { + commandSender.sendErrorResponse(requestId, + ServerError.NotAllowedError, rc.getMessage()); + } + producers.remove(producerId, producerFuture); + return; + } String msg = "Failed to create the initial subscription: " + ex.getCause().getMessage(); log.warn("[{}] {} initialSubscriptionName: {}, topic: {}", @@ -1537,7 +1562,7 @@ protected void handleProducer(final CommandProducer cmdProducer) { // Do not print stack traces for expected exceptions if (cause instanceof NoSuchElementException) { - cause = new TopicNotFoundException("Topic Not Found."); + cause = new TopicNotFoundException(String.format("Topic not found %s", topicName.toString())); log.warn("[{}] Failed to load topic {}, producerId={}: Topic not found", remoteAddress, topicName, producerId); } else if (!Exceptions.areExceptionsPresentInChain(cause, @@ -1608,7 +1633,7 @@ private void buildProducerAndAddTopic(Topic topic, long producerId, String produ } producers.remove(producerId, producerFuture); - }).exceptionally(ex -> { + }).exceptionallyAsync(ex -> { if (ex.getCause() instanceof BrokerServiceException.TopicMigratedException) { Optional clusterURL = getMigratedClusterUrl(service.getPulsar()); if (clusterURL.isPresent()) { @@ -1649,7 +1674,7 @@ private void buildProducerAndAddTopic(Topic topic, long producerId, String produ BrokerServiceException.getClientErrorCode(ex), ex.getMessage()); } return null; - }); + }, ctx.executor()); producerQueuedFuture.thenRun(() -> { // If the producer is queued waiting, we will get an immediate notification @@ -1738,11 +1763,12 @@ private void printSendCommandDebug(CommandSend send, ByteBuf headersAndPayload) headersAndPayload.resetReaderIndex(); if (log.isDebugEnabled()) { log.debug("[{}] Received send message request. producer: {}:{} {}:{} size: {}," - + " partition key is: {}, ordering key is {}", + + " partition key is: {}, ordering key is {}, uncompressedSize is {}", remoteAddress, send.getProducerId(), send.getSequenceId(), msgMetadata.getProducerName(), msgMetadata.getSequenceId(), headersAndPayload.readableBytes(), msgMetadata.hasPartitionKey() ? msgMetadata.getPartitionKey() : null, - msgMetadata.hasOrderingKey() ? msgMetadata.getOrderingKey() : null); + msgMetadata.hasOrderingKey() ? msgMetadata.getOrderingKey() : null, + msgMetadata.getUncompressedSize()); } } @@ -1778,6 +1804,12 @@ protected void handleAck(CommandAck ack) { } return null; }); + } else { + if (log.isDebugEnabled()) { + log.debug("Consumer future is not complete(not complete or error), but received command ack. so discard" + + " this command. consumerId: {}, cnx: {}, messageIdCount: {}", ack.getConsumerId(), + this.ctx().channel().toString(), ack.getMessageIdsCount()); + } } } @@ -2167,6 +2199,14 @@ private int calculateTheLastBatchIndexInBatch(MessageMetadata metadata, ByteBuf if (batchSize <= 1){ return -1; } + if (metadata.hasCompression()) { + var tmp = payload; + CompressionType compressionType = metadata.getCompression(); + CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(compressionType); + int uncompressedSize = metadata.getUncompressedSize(); + payload = codec.decode(payload, uncompressedSize); + tmp.release(); + } SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata(); int lastBatchIndexInBatch = -1; for (int i = 0; i < batchSize; i++){ @@ -2326,7 +2366,7 @@ remoteAddress, new String(commandGetSchema.getSchemaVersion()), schemaService.getSchema(schemaName, schemaVersion).thenAccept(schemaAndMetadata -> { if (schemaAndMetadata == null) { commandSender.sendGetSchemaErrorResponse(requestId, ServerError.TopicNotFound, - "Topic not found or no-schema"); + String.format("Topic not found or no-schema %s", commandGetSchema.getTopic())); } else { commandSender.sendGetSchemaResponse(requestId, SchemaInfoUtil.newSchemaInfo(schemaName, schemaAndMetadata.schema), schemaAndMetadata.version); @@ -2344,7 +2384,7 @@ protected void handleGetOrCreateSchema(CommandGetOrCreateSchema commandGetOrCrea log.debug("Received CommandGetOrCreateSchema call from {}", remoteAddress); } long requestId = commandGetOrCreateSchema.getRequestId(); - String topicName = commandGetOrCreateSchema.getTopic(); + final String topicName = commandGetOrCreateSchema.getTopic(); SchemaData schemaData = getSchema(commandGetOrCreateSchema.getSchema()); SchemaData schema = schemaData.getType() == SchemaType.NONE ? null : schemaData; service.getTopicIfExists(topicName).thenAccept(topicOpt -> { @@ -2364,7 +2404,7 @@ protected void handleGetOrCreateSchema(CommandGetOrCreateSchema commandGetOrCrea }); } else { commandSender.sendGetOrCreateSchemaErrorResponse(requestId, ServerError.TopicNotFound, - "Topic not found"); + String.format("Topic not found %s", topicName)); } }).exceptionally(ex -> { ServerError errorCode = BrokerServiceException.getClientErrorCode(ex); @@ -2920,6 +2960,7 @@ protected void interceptCommand(BaseCommand command) throws InterceptException { @Override public void closeProducer(Producer producer) { + assert ctx.executor().inEventLoop(); // removes producer-connection from map and send close command to producer safelyRemoveProducer(producer); if (getRemoteEndpointProtocolVersion() >= v5.getValue()) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java index 9b10055f36fac..71f78e21f938f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java @@ -18,18 +18,23 @@ */ package org.apache.pulsar.broker.service; +import static java.util.Objects.requireNonNull; +import com.github.benmanes.caffeine.cache.AsyncLoadingCache; +import com.github.benmanes.caffeine.cache.Caffeine; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import javax.annotation.Nonnull; +import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.namespace.NamespaceBundleOwnershipListener; @@ -40,10 +45,8 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.client.impl.TopicMessageImpl; -import org.apache.pulsar.client.util.RetryUtil; import org.apache.pulsar.common.events.ActionType; import org.apache.pulsar.common.events.EventType; import org.apache.pulsar.common.events.PulsarEvent; @@ -53,6 +56,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.util.FutureUtil; +import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,34 +81,52 @@ public class SystemTopicBasedTopicPoliciesService implements TopicPoliciesServic private final Map>> readerCaches = new ConcurrentHashMap<>(); - @VisibleForTesting - final Map policyCacheInitMap = new ConcurrentHashMap<>(); + + final Map> policyCacheInitMap = new ConcurrentHashMap<>(); @VisibleForTesting final Map>> listeners = new ConcurrentHashMap<>(); + private final AsyncLoadingCache> writerCaches; + public SystemTopicBasedTopicPoliciesService(PulsarService pulsarService) { this.pulsarService = pulsarService; this.clusterName = pulsarService.getConfiguration().getClusterName(); this.localCluster = Sets.newHashSet(clusterName); + this.writerCaches = Caffeine.newBuilder() + .expireAfterAccess(5, TimeUnit.MINUTES) + .removalListener((namespaceName, writer, cause) -> { + ((SystemTopicClient.Writer) writer).closeAsync().exceptionally(ex -> { + log.error("[{}] Close writer error.", namespaceName, ex); + return null; + }); + }) + .buildAsync((namespaceName, executor) -> { + SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory + .createTopicPoliciesSystemTopicClient(namespaceName); + return systemTopicClient.newWriterAsync(); + }); } @Override public CompletableFuture deleteTopicPoliciesAsync(TopicName topicName) { + if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { + return CompletableFuture.completedFuture(null); + } return sendTopicPolicyEvent(topicName, ActionType.DELETE, null); } @Override public CompletableFuture updateTopicPoliciesAsync(TopicName topicName, TopicPolicies policies) { + if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { + return CompletableFuture.failedFuture(new BrokerServiceException.NotAllowedException( + "Not allowed to update topic policy for the heartbeat topic")); + } return sendTopicPolicyEvent(topicName, ActionType.UPDATE, policies); } private CompletableFuture sendTopicPolicyEvent(TopicName topicName, ActionType actionType, TopicPolicies policies) { - if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { - return CompletableFuture.failedFuture( - new BrokerServiceException.NotAllowedException("Not allowed to send event to health check topic")); - } return pulsarService.getPulsarResources().getNamespaceResources() .getPoliciesAsync(topicName.getNamespaceObject()) .thenCompose(namespacePolicies -> { @@ -118,39 +140,32 @@ private CompletableFuture sendTopicPolicyEvent(TopicName topicName, Action } catch (PulsarServerException e) { return CompletableFuture.failedFuture(e); } - - SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory - .createTopicPoliciesSystemTopicClient(topicName.getNamespaceObject()); - - return systemTopicClient.newWriterAsync() - .thenCompose(writer -> { - PulsarEvent event = getPulsarEvent(topicName, actionType, policies); - CompletableFuture writeFuture = - ActionType.DELETE.equals(actionType) ? writer.deleteAsync(getEventKey(event), event) - : writer.writeAsync(getEventKey(event), event); - return writeFuture.handle((messageId, e) -> { - if (e != null) { - return CompletableFuture.failedFuture(e); + CompletableFuture result = new CompletableFuture<>(); + writerCaches.get(topicName.getNamespaceObject()) + .whenComplete((writer, cause) -> { + if (cause != null) { + writerCaches.synchronous().invalidate(topicName.getNamespaceObject()); + result.completeExceptionally(cause); } else { - if (messageId != null) { - return CompletableFuture.completedFuture(null); - } else { - return CompletableFuture.failedFuture( - new RuntimeException("Got message id is null.")); - } - } - }).thenRun(() -> - writer.closeAsync().whenComplete((v, cause) -> { - if (cause != null) { - log.error("[{}] Close writer error.", topicName, cause); + PulsarEvent event = getPulsarEvent(topicName, actionType, policies); + CompletableFuture writeFuture = ActionType.DELETE.equals(actionType) + ? writer.deleteAsync(getEventKey(event), event) + : writer.writeAsync(getEventKey(event), event); + writeFuture.whenComplete((messageId, e) -> { + if (e != null) { + result.completeExceptionally(e); + } else { + if (messageId != null) { + result.complete(null); } else { - if (log.isDebugEnabled()) { - log.debug("[{}] Close writer success.", topicName); - } + result.completeExceptionally( + new RuntimeException("Got message id is null.")); } - }) - ); + } + }); + } }); + return result; }); } @@ -180,7 +195,11 @@ private void notifyListener(Message msg) { TopicName topicName = TopicName.get(TopicName.get(msg.getKey()).getPartitionedTopicName()); if (listeners.get(topicName) != null) { for (TopicPolicyListener listener : listeners.get(topicName)) { - listener.onUpdate(null); + try { + listener.onUpdate(null); + } catch (Throwable error) { + log.error("[{}] call listener error.", topicName, error); + } } } return; @@ -195,7 +214,11 @@ private void notifyListener(Message msg) { if (listeners.get(topicName) != null) { TopicPolicies policies = event.getPolicies(); for (TopicPolicyListener listener : listeners.get(topicName)) { - listener.onUpdate(policies); + try { + listener.onUpdate(policies); + } catch (Throwable error) { + log.error("[{}] call listener error.", topicName, error); + } } } } @@ -208,16 +231,60 @@ public TopicPolicies getTopicPolicies(TopicName topicName) throws TopicPoliciesC @Override public TopicPolicies getTopicPolicies(TopicName topicName, boolean isGlobal) throws TopicPoliciesCacheNotInitException { + if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { + return null; + } if (!policyCacheInitMap.containsKey(topicName.getNamespaceObject())) { NamespaceName namespace = topicName.getNamespaceObject(); - prepareInitPoliciesCache(namespace, new CompletableFuture<>()); + prepareInitPoliciesCacheAsync(namespace); } - if (policyCacheInitMap.containsKey(topicName.getNamespaceObject()) - && !policyCacheInitMap.get(topicName.getNamespaceObject())) { - throw new TopicPoliciesCacheNotInitException(); + + MutablePair result = new MutablePair<>(); + policyCacheInitMap.compute(topicName.getNamespaceObject(), (k, initialized) -> { + if (initialized == null || !initialized.isDone()) { + result.setLeft(new TopicPoliciesCacheNotInitException()); + } else { + TopicPolicies topicPolicies = + isGlobal ? globalPoliciesCache.get(TopicName.get(topicName.getPartitionedTopicName())) + : policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + result.setRight(topicPolicies); + } + return initialized; + }); + + if (result.getLeft() != null) { + throw result.getLeft(); + } else { + return result.getRight(); } - return isGlobal ? globalPoliciesCache.get(TopicName.get(topicName.getPartitionedTopicName())) - : policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + } + + @NotNull + @Override + public CompletableFuture> getTopicPoliciesAsync(@NotNull TopicName topicName, + boolean isGlobal) { + requireNonNull(topicName); + final CompletableFuture preparedFuture = prepareInitPoliciesCacheAsync(topicName.getNamespaceObject()); + return preparedFuture.thenApply(__ -> { + final TopicPolicies candidatePolicies = isGlobal + ? globalPoliciesCache.get(TopicName.get(topicName.getPartitionedTopicName())) + : policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + return Optional.ofNullable(candidatePolicies); + }); + } + + @NotNull + @Override + public CompletableFuture> getTopicPoliciesAsync(@NotNull TopicName topicName) { + requireNonNull(topicName); + final CompletableFuture preparedFuture = prepareInitPoliciesCacheAsync(topicName.getNamespaceObject()); + return preparedFuture.thenApply(__ -> { + final TopicPolicies localPolicies = policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + if (localPolicies != null) { + return Optional.of(localPolicies); + } + return Optional.ofNullable(globalPoliciesCache.get(TopicName.get(topicName.getPartitionedTopicName()))); + }); } @Override @@ -243,55 +310,60 @@ public CompletableFuture getTopicPoliciesBypassCacheAsync(TopicNa @Override public CompletableFuture addOwnedNamespaceBundleAsync(NamespaceBundle namespaceBundle) { - CompletableFuture result = new CompletableFuture<>(); NamespaceName namespace = namespaceBundle.getNamespaceObject(); if (NamespaceService.isHeartbeatNamespace(namespace)) { - result.complete(null); - return result; + return CompletableFuture.completedFuture(null); } synchronized (this) { if (readerCaches.get(namespace) != null) { ownedBundlesCountPerNamespace.get(namespace).incrementAndGet(); - result.complete(null); + return CompletableFuture.completedFuture(null); } else { - prepareInitPoliciesCache(namespace, result); + return prepareInitPoliciesCacheAsync(namespace); } } - return result; } - private void prepareInitPoliciesCache(@Nonnull NamespaceName namespace, CompletableFuture result) { - if (policyCacheInitMap.putIfAbsent(namespace, false) == null) { - CompletableFuture> readerCompletableFuture = - createSystemTopicClientWithRetry(namespace); + private @Nonnull CompletableFuture prepareInitPoliciesCacheAsync(@Nonnull NamespaceName namespace) { + requireNonNull(namespace); + return policyCacheInitMap.computeIfAbsent(namespace, (k) -> { + final CompletableFuture> readerCompletableFuture = + createSystemTopicClient(namespace); readerCaches.put(namespace, readerCompletableFuture); ownedBundlesCountPerNamespace.putIfAbsent(namespace, new AtomicInteger(1)); - readerCompletableFuture.thenAccept(reader -> { - initPolicesCache(reader, result); - result.thenRun(() -> readMorePolicies(reader)); - }).exceptionally(ex -> { - log.error("[{}] Failed to create reader on __change_events topic", namespace, ex); - cleanCacheAndCloseReader(namespace, false); - result.completeExceptionally(ex); + final CompletableFuture initFuture = readerCompletableFuture + .thenCompose(reader -> { + final CompletableFuture stageFuture = new CompletableFuture<>(); + initPolicesCache(reader, stageFuture); + return stageFuture + // Read policies in background + .thenAccept(__ -> readMorePoliciesAsync(reader)); + }); + initFuture.exceptionally(ex -> { + try { + log.error("[{}] Failed to create reader on __change_events topic", namespace, ex); + cleanCacheAndCloseReader(namespace, false); + } catch (Throwable cleanupEx) { + // Adding this catch to avoid break callback chain + log.error("[{}] Failed to cleanup reader on __change_events topic", namespace, cleanupEx); + } return null; }); - } + // let caller know we've got an exception. + return initFuture; + }); } - protected CompletableFuture> createSystemTopicClientWithRetry( + protected CompletableFuture> createSystemTopicClient( NamespaceName namespace) { - CompletableFuture> result = new CompletableFuture<>(); try { createSystemTopicFactoryIfNeeded(); - } catch (PulsarServerException e) { - result.completeExceptionally(e); - return result; + } catch (PulsarServerException ex) { + return FutureUtil.failedFuture(ex); } - SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory + final SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory .createTopicPoliciesSystemTopicClient(namespace); - Backoff backoff = new Backoff(1, TimeUnit.SECONDS, 3, TimeUnit.SECONDS, 10, TimeUnit.SECONDS); - RetryUtil.retryAsynchronously(systemTopicClient::newReaderAsync, backoff, pulsarService.getExecutor(), result); - return result; + return systemTopicClient.newReaderAsync(); } @Override @@ -303,7 +375,7 @@ public CompletableFuture removeOwnedNamespaceBundleAsync(NamespaceBundle n } AtomicInteger bundlesCount = ownedBundlesCountPerNamespace.get(namespace); if (bundlesCount == null || bundlesCount.decrementAndGet() <= 0) { - cleanCacheAndCloseReader(namespace, true); + cleanCacheAndCloseReader(namespace, true, true); } return CompletableFuture.completedFuture(null); } @@ -359,24 +431,36 @@ private void initPolicesCache(SystemTopicClient.Reader reader, Comp if (log.isDebugEnabled()) { log.debug("[{}] Reach the end of the system topic.", reader.getSystemTopic().getTopicName()); } - policyCacheInitMap.computeIfPresent( - reader.getSystemTopic().getTopicName().getNamespaceObject(), (k, v) -> true); + // replay policy message policiesCache.forEach(((topicName, topicPolicies) -> { if (listeners.get(topicName) != null) { for (TopicPolicyListener listener : listeners.get(topicName)) { - listener.onUpdate(topicPolicies); + try { + listener.onUpdate(topicPolicies); + } catch (Throwable error) { + log.error("[{}] call listener error.", topicName, error); + } } } })); + future.complete(null); } }); } private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean cleanOwnedBundlesCount) { + cleanCacheAndCloseReader(namespace, cleanOwnedBundlesCount, false); + } + + private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean cleanOwnedBundlesCount, + boolean cleanWriterCache) { + if (cleanWriterCache) { + writerCaches.synchronous().invalidate(namespace); + } CompletableFuture> readerFuture = readerCaches.remove(namespace); - policiesCache.entrySet().removeIf(entry -> Objects.equals(entry.getKey().getNamespaceObject(), namespace)); + if (cleanOwnedBundlesCount) { ownedBundlesCountPerNamespace.remove(namespace); } @@ -387,10 +471,20 @@ private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean return null; }); } - policyCacheInitMap.remove(namespace); + + policyCacheInitMap.compute(namespace, (k, v) -> { + policiesCache.entrySet().removeIf(entry -> Objects.equals(entry.getKey().getNamespaceObject(), namespace)); + return null; + }); } - private void readMorePolicies(SystemTopicClient.Reader reader) { + /** + * This is an async method for the background reader to continue syncing new messages. + * + * Note: You should not do any blocking call here. because it will affect + * #{@link SystemTopicBasedTopicPoliciesService#getTopicPoliciesAsync(TopicName)} method to block loading topic. + */ + private void readMorePoliciesAsync(SystemTopicClient.Reader reader) { reader.readNextAsync() .thenAccept(msg -> { refreshTopicPoliciesCache(msg); @@ -398,7 +492,7 @@ private void readMorePolicies(SystemTopicClient.Reader reader) { }) .whenComplete((__, ex) -> { if (ex == null) { - readMorePolicies(reader); + readMorePoliciesAsync(reader); } else { Throwable cause = FutureUtil.unwrapCompletionException(ex); if (cause instanceof PulsarClientException.AlreadyClosedException) { @@ -407,7 +501,7 @@ private void readMorePolicies(SystemTopicClient.Reader reader) { reader.getSystemTopic().getTopicName().getNamespaceObject(), false); } else { log.warn("Read more topic polices exception, read again.", ex); - readMorePolicies(reader); + readMorePoliciesAsync(reader); } } }); @@ -575,7 +669,7 @@ boolean checkReaderIsCached(NamespaceName namespaceName) { } @VisibleForTesting - public Boolean getPoliciesCacheInit(NamespaceName namespaceName) { + public CompletableFuture getPoliciesCacheInit(NamespaceName namespaceName) { return policyCacheInitMap.get(namespaceName); } @@ -613,5 +707,10 @@ protected Map>> getListeners( return listeners; } + @VisibleForTesting + protected AsyncLoadingCache> getWriterCaches() { + return writerCaches; + } + private static final Logger log = LoggerFactory.getLogger(SystemTopicBasedTopicPoliciesService.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java index 7aa50057d73c9..aea5b9fc65b46 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java @@ -31,6 +31,7 @@ import org.apache.pulsar.common.api.proto.CommandWatchTopicListClose; import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.topics.TopicList; import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; import org.apache.pulsar.metadata.api.NotificationType; @@ -42,11 +43,16 @@ public class TopicListService { public static class TopicListWatcher implements BiConsumer { + /** Topic names which are matching, the topic name contains the partition suffix. **/ private final List matchingTopics; private final TopicListService topicListService; private final long id; + /** The regexp for the topic name(not contains partition suffix). **/ private final Pattern topicsPattern; + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public TopicListWatcher(TopicListService topicListService, long id, Pattern topicsPattern, List topics) { this.topicListService = topicListService; @@ -59,9 +65,12 @@ public List getMatchingTopics() { return matchingTopics; } + /*** + * @param topicName topic name which contains partition suffix. + */ @Override public void accept(String topicName, NotificationType notificationType) { - if (topicsPattern.matcher(topicName).matches()) { + if (topicsPattern.matcher(TopicName.get(topicName).getPartitionedTopicName()).matches()) { List newTopics; List deletedTopics; if (notificationType == NotificationType.Deleted) { @@ -109,6 +118,9 @@ public void inactivate() { } } + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public void handleWatchTopicList(NamespaceName namespaceName, long watcherId, long requestId, Pattern topicsPattern, String topicsHash, Semaphore lookupSemaphore) { @@ -184,7 +196,9 @@ public void handleWatchTopicList(NamespaceName namespaceName, long watcherId, lo }); } - + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public void initializeTopicsListWatcher(CompletableFuture watcherFuture, NamespaceName namespace, long watcherId, Pattern topicsPattern) { namespaceService.getListOfPersistentTopics(namespace). @@ -246,6 +260,10 @@ public void deleteTopicListWatcher(Long watcherId) { log.info("[{}] Closed watcher, watcherId={}", connection.getRemoteAddress(), watcherId); } + /** + * @param deletedTopics topic names deleted(contains the partition suffix). + * @param newTopics topics names added(contains the partition suffix). + */ public void sendTopicListUpdate(long watcherId, String topicsHash, List deletedTopics, List newTopics) { connection.getCommandSender().sendWatchTopicListUpdate(watcherId, newTopics, deletedTopics, topicsHash); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java index c4bcc0c39353c..c09bab0a4b62c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java @@ -22,6 +22,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; import org.apache.pulsar.broker.service.BrokerServiceException.TopicPoliciesCacheNotInitException; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.BackoffBuilder; @@ -109,6 +110,32 @@ default CompletableFuture> getTopicPoliciesAsyncWithRetr return response; } + /** + * Asynchronously retrieves topic policies. + * This triggers the Pulsar broker's internal client to load policies from the + * system topic `persistent://tenant/namespace/__change_event`. + * + * @param topicName The name of the topic. + * @param isGlobal Indicates if the policies are global. + * @return A CompletableFuture containing an Optional of TopicPolicies. + * @throws NullPointerException If the topicName is null. + */ + @Nonnull + CompletableFuture> getTopicPoliciesAsync(@Nonnull TopicName topicName, boolean isGlobal); + + /** + * Asynchronously retrieves topic policies. + * This triggers the Pulsar broker's internal client to load policies from the + * system topic `persistent://tenant/namespace/__change_event`. + * + * NOTE: If local policies are not available, it will fallback to using topic global policies. + * @param topicName The name of the topic. + * @return A CompletableFuture containing an Optional of TopicPolicies. + * @throws NullPointerException If the topicName is null. + */ + @Nonnull + CompletableFuture> getTopicPoliciesAsync(@Nonnull TopicName topicName); + /** * Get policies for a topic without cache async. * @param topicName topic name @@ -162,6 +189,19 @@ public TopicPolicies getTopicPolicies(TopicName topicName, boolean isGlobal) return null; } + @Nonnull + @Override + public CompletableFuture> getTopicPoliciesAsync(@Nonnull TopicName topicName, + boolean isGlobal) { + return CompletableFuture.completedFuture(Optional.empty()); + } + + @Nonnull + @Override + public CompletableFuture> getTopicPoliciesAsync(@Nonnull TopicName topicName) { + return CompletableFuture.completedFuture(Optional.empty()); + } + @Override public TopicPolicies getTopicPoliciesIfExists(TopicName topicName) { return null; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java index c106b1603f6bd..bfb7785a4f5c4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java @@ -184,7 +184,7 @@ public RedeliveryTracker getRedeliveryTracker() { } @Override - public void sendMessages(List entries) { + public synchronized void sendMessages(List entries) { Consumer consumer = TOTAL_AVAILABLE_PERMITS_UPDATER.get(this) > 0 ? getNextConsumer() : null; if (consumer != null) { SendMessageInfo sendMessageInfo = SendMessageInfo.getThreadLocal(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java index 514db4219db98..087c5f932008f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java @@ -50,7 +50,7 @@ public class NonPersistentReplicator extends AbstractReplicator implements Repli public NonPersistentReplicator(NonPersistentTopic topic, String localCluster, String remoteCluster, BrokerService brokerService, PulsarClientImpl replicationClient) throws PulsarServerException { - super(localCluster, topic.getName(), remoteCluster, topic.getName(), topic.getReplicatorPrefix(), brokerService, + super(localCluster, topic, remoteCluster, topic.getName(), topic.getReplicatorPrefix(), brokerService, replicationClient); producerBuilder.blockIfQueueFull(false); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java index 317b8df6b9a82..4efaee2a1faa7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java @@ -26,7 +26,7 @@ import io.netty.buffer.ByteBuf; import io.netty.util.concurrent.FastThreadLocal; import java.util.ArrayList; -import java.util.Collections; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -168,17 +168,19 @@ public CompletableFuture initialize() { return brokerService.pulsar().getPulsarResources().getNamespaceResources() .getPoliciesAsync(TopicName.get(topic).getNamespaceObject()) .thenCompose(optPolicies -> { + final Policies policies; if (!optPolicies.isPresent()) { log.warn("[{}] Policies not present and isEncryptionRequired will be set to false", topic); isEncryptionRequired = false; + policies = new Policies(); } else { - Policies policies = optPolicies.get(); + policies = optPolicies.get(); updateTopicPolicyByNamespacePolicy(policies); isEncryptionRequired = policies.encryption_required; isAllowAutoUpdateSchema = policies.is_allow_auto_update_schema; } - updatePublishDispatcher(); - updateResourceGroupLimiter(optPolicies); + updatePublishRateLimiter(); + updateResourceGroupLimiter(policies); return updateClusterMigrated(); }); } @@ -199,7 +201,8 @@ public void publishMessage(ByteBuf data, PublishContext callback) { // entry internally retains data so, duplicateBuffer should be release here duplicateBuffer.release(); if (subscription.getDispatcher() != null) { - subscription.getDispatcher().sendMessages(Collections.singletonList(entry)); + // Dispatcher needs to call the set method to support entry filter feature. + subscription.getDispatcher().sendMessages(Arrays.asList(entry)); } else { // it happens when subscription is created but dispatcher is not created as consumer is not added // yet @@ -634,6 +637,7 @@ CompletableFuture removeReplicator(String remoteCluster) { replicators.get(remoteCluster).disconnect().thenRun(() -> { log.info("[{}] Successfully removed replicator {}", name, remoteCluster); + replicators.remove(remoteCluster); }).exceptionally(e -> { log.error("[{}] Failed to close replication producer {} {}", topic, name, e.getMessage(), e); @@ -920,7 +924,7 @@ public CompletableFuture asyncGetStats(boolean getP }); stats.topicEpoch = topicEpoch.orElse(null); - stats.ownerBroker = brokerService.pulsar().getLookupServiceAddress(); + stats.ownerBroker = brokerService.pulsar().getBrokerId(); future.complete(stats); return future; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java index ec34aeffbec4c..7921c25b071fb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java @@ -22,12 +22,15 @@ import static org.apache.pulsar.broker.service.AbstractBaseDispatcher.checkAndApplyReachedEndOfTopicOrTopicMigration; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; import org.apache.pulsar.common.api.proto.CommandAck.AckType; import org.apache.pulsar.compaction.CompactedTopic; +import org.apache.pulsar.compaction.CompactedTopicContext; +import org.apache.pulsar.compaction.CompactedTopicImpl; import org.apache.pulsar.compaction.Compactor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,5 +109,19 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { } } + CompletableFuture cleanCompactedLedger() { + final CompletableFuture compactedTopicContextFuture = + ((CompactedTopicImpl) compactedTopic).getCompactedTopicContextFuture(); + if (compactedTopicContextFuture != null) { + return compactedTopicContextFuture.thenCompose(context -> { + long compactedLedgerId = context.getLedger().getId(); + ((CompactedTopicImpl) compactedTopic).reset(); + return compactedTopic.deleteCompactedLedger(compactedLedgerId); + }); + } else { + return CompletableFuture.completedFuture(null); + } + } + private static final Logger log = LoggerFactory.getLogger(CompactorSubscription.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/DispatchRateLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/DispatchRateLimiter.java index b1e4803548414..800a82cc58cc4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/DispatchRateLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/DispatchRateLimiter.java @@ -204,6 +204,12 @@ public static CompletableFuture> getPoliciesAsync(BrokerServi return brokerService.pulsar().getPulsarResources().getNamespaceResources().getPoliciesAsync(namespace); } + /** + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking + * call. we can use #{@link DispatchRateLimiter#getPoliciesAsync(BrokerService, String)} to instead of it. + */ + @Deprecated public static Optional getPolicies(BrokerService brokerService, String topicName) { final NamespaceName namespace = TopicName.get(topicName).getNamespaceObject(); return brokerService.pulsar().getPulsarResources().getNamespaceResources().getPoliciesIfCached(namespace); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java index 08882982297ab..082dfed10c664 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java @@ -123,18 +123,6 @@ protected boolean replicateEntries(List entries) { continue; } - if (msg.isExpired(messageTTLInSeconds)) { - msgExpired.recordEvent(0 /* no value stat */); - if (log.isDebugEnabled()) { - log.debug("[{}] Discarding expired message at position {}, replicateTo {}", - replicatorId, entry.getPosition(), msg.getReplicateTo()); - } - cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); - entry.release(); - msg.recycle(); - continue; - } - if (STATE_UPDATER.get(this) != State.Started || isLocalMessageSkippedOnce) { // The producer is not ready yet after having stopped/restarted. Drop the message because it will // recovered when the producer is ready @@ -149,9 +137,6 @@ protected boolean replicateEntries(List entries) { } dispatchRateLimiter.ifPresent(rateLimiter -> rateLimiter.tryDispatchPermit(1, entry.getLength())); - - msgOut.recordEvent(headersAndPayload.readableBytes()); - msg.setReplicatedFrom(localCluster); headersAndPayload.retain(); @@ -181,6 +166,7 @@ protected boolean replicateEntries(List entries) { msg.setSchemaInfoForReplicator(schemaFuture.get()); msg.getMessageBuilder().clearTxnidMostBits(); msg.getMessageBuilder().clearTxnidLeastBits(); + msgOut.recordEvent(headersAndPayload.readableBytes()); // Increment pending messages for messages produced locally PENDING_MESSAGES_UPDATER.incrementAndGet(this); producer.sendAsync(msg, ProducerSendCallback.create(this, entry, msg)); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java index 030d74a014f29..802dd91796127 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java @@ -20,13 +20,14 @@ import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCursorCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.OpenCursorCallback; @@ -55,6 +56,8 @@ public class MessageDeduplication { private final ManagedLedger managedLedger; private ManagedCursor managedCursor; + private static final String IS_LAST_CHUNK = "isLastChunk"; + enum Status { // Deduplication is initialized @@ -126,10 +129,13 @@ public MessageDupUnknownException() { private final int maxNumberOfProducers; // Map used to track the inactive producer along with the timestamp of their last activity - private final Map inactiveProducers = new HashMap<>(); + private final Map inactiveProducers = new ConcurrentHashMap<>(); private final String replicatorPrefix; + + private final AtomicBoolean snapshotTaking = new AtomicBoolean(false); + public MessageDeduplication(PulsarService pulsar, PersistentTopic topic, ManagedLedger managedLedger) { this.pulsar = pulsar; this.topic = topic; @@ -324,11 +330,12 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade String producerName = publishContext.getProducerName(); long sequenceId = publishContext.getSequenceId(); long highestSequenceId = Math.max(publishContext.getHighestSequenceId(), sequenceId); + MessageMetadata md = null; if (producerName.startsWith(replicatorPrefix)) { // Message is coming from replication, we need to use the original producer name and sequence id // for the purpose of deduplication and not rely on the "replicator" name. int readerIndex = headersAndPayload.readerIndex(); - MessageMetadata md = Commands.parseMessageMetadata(headersAndPayload); + md = Commands.parseMessageMetadata(headersAndPayload); producerName = md.getProducerName(); sequenceId = md.getSequenceId(); highestSequenceId = Math.max(md.getHighestSequenceId(), sequenceId); @@ -337,7 +344,23 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade publishContext.setOriginalHighestSequenceId(highestSequenceId); headersAndPayload.readerIndex(readerIndex); } - + long chunkID = -1; + long totalChunk = -1; + if (publishContext.isChunked()) { + if (md == null) { + int readerIndex = headersAndPayload.readerIndex(); + md = Commands.parseMessageMetadata(headersAndPayload); + headersAndPayload.readerIndex(readerIndex); + } + chunkID = md.getChunkId(); + totalChunk = md.getNumChunksFromMsg(); + } + // All chunks of a message use the same message metadata and sequence ID, + // so we only need to check the sequence ID for the last chunk in a chunk message. + if (chunkID != -1 && chunkID != totalChunk - 1) { + publishContext.setProperty(IS_LAST_CHUNK, Boolean.FALSE); + return MessageDupStatus.NotDup; + } // Synchronize the get() and subsequent put() on the map. This would only be relevant if the producer // disconnects and re-connects very quickly. At that point the call can be coming from a different thread synchronized (highestSequencedPushed) { @@ -363,6 +386,11 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade } highestSequencedPushed.put(producerName, highestSequenceId); } + // Only put sequence ID into highestSequencedPushed and + // highestSequencedPersisted until receive and persistent the last chunk. + if (chunkID != -1 && chunkID == totalChunk - 1) { + publishContext.setProperty(IS_LAST_CHUNK, Boolean.TRUE); + } return MessageDupStatus.NotDup; } @@ -383,8 +411,10 @@ public void recordMessagePersisted(PublishContext publishContext, PositionImpl p sequenceId = publishContext.getOriginalSequenceId(); highestSequenceId = publishContext.getOriginalHighestSequenceId(); } - - highestSequencedPersisted.put(producerName, Math.max(highestSequenceId, sequenceId)); + Boolean isLastChunk = (Boolean) publishContext.getProperty(IS_LAST_CHUNK); + if (isLastChunk == null || isLastChunk) { + highestSequencedPersisted.put(producerName, Math.max(highestSequenceId, sequenceId)); + } if (++snapshotCounter >= snapshotInterval) { snapshotCounter = 0; takeSnapshot(position); @@ -406,6 +436,11 @@ private void takeSnapshot(Position position) { if (log.isDebugEnabled()) { log.debug("[{}] Taking snapshot of sequence ids map", topic.getName()); } + + if (!snapshotTaking.compareAndSet(false, true)) { + return; + } + Map snapshot = new TreeMap<>(); highestSequencedPersisted.forEach((producerName, sequenceId) -> { if (snapshot.size() < maxNumberOfProducers) { @@ -420,11 +455,13 @@ public void markDeleteComplete(Object ctx) { log.debug("[{}] Stored new deduplication snapshot at {}", topic.getName(), position); } lastSnapshotTimestamp = System.currentTimeMillis(); + snapshotTaking.set(false); } @Override public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { log.warn("[{}] Failed to store new deduplication snapshot at {}", topic.getName(), position); + snapshotTaking.set(false); } }, null); } @@ -436,7 +473,11 @@ private boolean isDeduplicationEnabled() { /** * Topic will call this method whenever a producer connects. */ - public synchronized void producerAdded(String producerName) { + public void producerAdded(String producerName) { + if (!isEnabled()) { + return; + } + // Producer is no-longer inactive inactiveProducers.remove(producerName); } @@ -444,7 +485,11 @@ public synchronized void producerAdded(String producerName) { /** * Topic will call this method whenever a producer disconnects. */ - public synchronized void producerRemoved(String producerName) { + public void producerRemoved(String producerName) { + if (!isEnabled()) { + return; + } + // Producer is no-longer active inactiveProducers.put(producerName, System.currentTimeMillis()); } @@ -456,7 +501,15 @@ public synchronized void purgeInactiveProducers() { long minimumActiveTimestamp = System.currentTimeMillis() - TimeUnit.MINUTES .toMillis(pulsar.getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes()); - Iterator> mapIterator = inactiveProducers.entrySet().iterator(); + // if not enabled just clear all inactive producer record. + if (!isEnabled()) { + if (!inactiveProducers.isEmpty()) { + inactiveProducers.clear(); + } + return; + } + + Iterator> mapIterator = inactiveProducers.entrySet().iterator(); boolean hasInactive = false; while (mapIterator.hasNext()) { java.util.Map.Entry entry = mapIterator.next(); @@ -482,6 +535,10 @@ public long getLastPublishedSequenceId(String producerName) { } public void takeSnapshot() { + if (!isEnabled()) { + return; + } + Integer interval = topic.getHierarchyTopicPolicies().getDeduplicationSnapshotIntervalSeconds().get(); long currentTimeStamp = System.currentTimeMillis(); if (interval == null || interval <= 0 @@ -504,5 +561,10 @@ ManagedCursor getManagedCursor() { return managedCursor; } + @VisibleForTesting + Map getInactiveProducers() { + return inactiveProducers; + } + private static final Logger log = LoggerFactory.getLogger(MessageDeduplication.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java index 4ac755860fc7b..b3d48252efe58 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java @@ -46,9 +46,9 @@ import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.Pair; -import org.apache.pulsar.broker.delayed.AbstractDelayedDeliveryTracker; import org.apache.pulsar.broker.delayed.BucketDelayedDeliveryTrackerFactory; import org.apache.pulsar.broker.delayed.DelayedDeliveryTracker; +import org.apache.pulsar.broker.delayed.DelayedDeliveryTrackerFactory; import org.apache.pulsar.broker.delayed.InMemoryDelayedDeliveryTracker; import org.apache.pulsar.broker.delayed.bucket.BucketDelayedDeliveryTracker; import org.apache.pulsar.broker.service.AbstractDispatcherMultipleConsumers; @@ -1070,14 +1070,15 @@ public boolean trackDelayedDelivery(long ledgerId, long entryId, MessageMetadata } protected synchronized NavigableSet getMessagesToReplayNow(int maxMessagesToRead) { - if (!redeliveryMessages.isEmpty()) { - return redeliveryMessages.getMessagesToReplayNow(maxMessagesToRead); - } else if (delayedDeliveryTracker.isPresent() && delayedDeliveryTracker.get().hasMessageAvailable()) { + if (delayedDeliveryTracker.isPresent() && delayedDeliveryTracker.get().hasMessageAvailable()) { delayedDeliveryTracker.get().resetTickTime(topic.getDelayedDeliveryTickTimeMillis()); NavigableSet messagesAvailableNow = delayedDeliveryTracker.get().getScheduledMessages(maxMessagesToRead); messagesAvailableNow.forEach(p -> redeliveryMessages.add(p.getLedgerId(), p.getEntryId())); - return messagesAvailableNow; + } + + if (!redeliveryMessages.isEmpty()) { + return redeliveryMessages.getMessagesToReplayNow(maxMessagesToRead); } else { return Collections.emptyNavigableSet(); } @@ -1088,7 +1089,7 @@ protected synchronized boolean shouldPauseDeliveryForDelayTracker() { } @Override - public synchronized long getNumberOfDelayedMessages() { + public long getNumberOfDelayedMessages() { return delayedDeliveryTracker.map(DelayedDeliveryTracker::getNumberOfDelayedMessages).orElse(0L); } @@ -1098,19 +1099,15 @@ public CompletableFuture clearDelayedMessages() { return CompletableFuture.completedFuture(null); } - if (delayedDeliveryTracker.isEmpty() && topic.getBrokerService() - .getDelayedDeliveryTrackerFactory() instanceof BucketDelayedDeliveryTrackerFactory) { - synchronized (this) { - if (delayedDeliveryTracker.isEmpty()) { - delayedDeliveryTracker = Optional - .of(topic.getBrokerService().getDelayedDeliveryTrackerFactory().newTracker(this)); - } - } - } - if (delayedDeliveryTracker.isPresent()) { return this.delayedDeliveryTracker.get().clear(); } else { + DelayedDeliveryTrackerFactory delayedDeliveryTrackerFactory = + topic.getBrokerService().getDelayedDeliveryTrackerFactory(); + if (delayedDeliveryTrackerFactory instanceof BucketDelayedDeliveryTrackerFactory + bucketDelayedDeliveryTrackerFactory) { + return bucketDelayedDeliveryTrackerFactory.cleanResidualSnapshots(cursor); + } return CompletableFuture.completedFuture(null); } } @@ -1172,15 +1169,7 @@ public PersistentTopic getTopic() { public long getDelayedTrackerMemoryUsage() { - if (delayedDeliveryTracker.isEmpty()) { - return 0; - } - - if (delayedDeliveryTracker.get() instanceof AbstractDelayedDeliveryTracker) { - return delayedDeliveryTracker.get().getBufferMemoryUsage(); - } - - return 0; + return delayedDeliveryTracker.map(DelayedDeliveryTracker::getBufferMemoryUsage).orElse(0L); } public Map getBucketDelayedIndexStats() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java index 7cbf7bd2c787a..eacc568f0a48f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java @@ -347,8 +347,8 @@ protected void readMoreEntries(Consumer consumer) { } havePendingRead = true; if (consumer.readCompacted()) { - topic.getCompactedTopic().asyncReadEntriesOrWait(cursor, messagesToRead, isFirstRead, - this, consumer); + topic.getCompactedTopic().asyncReadEntriesOrWait(cursor, messagesToRead, bytesToRead, + topic.getMaxReadPosition(), isFirstRead, this, consumer); } else { ReadEntriesCtx readEntriesCtx = ReadEntriesCtx.create(consumer, consumer.getConsumerEpoch()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java index bddcb1b334df1..41bc6098e1a2f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service.persistent; +import com.google.common.annotations.VisibleForTesting; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; @@ -46,7 +47,6 @@ public class PersistentMessageExpiryMonitor implements FindEntryCallback { private final String topicName; private final Rate msgExpired; private final LongAdder totalMsgExpired; - private final boolean autoSkipNonRecoverableData; private final PersistentSubscription subscription; private static final int FALSE = 0; @@ -65,8 +65,12 @@ public PersistentMessageExpiryMonitor(String topicName, String subscriptionName, this.subscription = subscription; this.msgExpired = new Rate(); this.totalMsgExpired = new LongAdder(); + } + + @VisibleForTesting + public boolean isAutoSkipNonRecoverableData() { // check to avoid test failures - this.autoSkipNonRecoverableData = this.cursor.getManagedLedger() != null + return this.cursor.getManagedLedger() != null && this.cursor.getManagedLedger().getConfig().isAutoSkipNonRecoverableData(); } @@ -171,7 +175,8 @@ public void findEntryComplete(Position position, Object ctx) { if (position != null) { log.info("[{}][{}] Expiring all messages until position {}", topicName, subName, position); Position prevMarkDeletePos = cursor.getMarkDeletedPosition(); - cursor.asyncMarkDelete(position, markDeleteCallback, cursor.getNumberOfEntriesInBacklog(false)); + cursor.asyncMarkDelete(position, cursor.getProperties(), markDeleteCallback, + cursor.getNumberOfEntriesInBacklog(false)); if (!Objects.equals(cursor.getMarkDeletedPosition(), prevMarkDeletePos) && subscription != null) { subscription.updateLastMarkDeleteAdvancedTimestamp(); } @@ -189,7 +194,7 @@ public void findEntryFailed(ManagedLedgerException exception, Optional if (log.isDebugEnabled()) { log.debug("[{}][{}] Finding expired entry operation failed", topicName, subName, exception); } - if (autoSkipNonRecoverableData && failedReadPosition.isPresent() + if (isAutoSkipNonRecoverableData() && failedReadPosition.isPresent() && (exception instanceof NonRecoverableLedgerException)) { log.warn("[{}][{}] read failed from ledger at position:{} : {}", topicName, subName, failedReadPosition, exception.getMessage()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageFinder.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageFinder.java index d2e6f6f5ff869..08273155e4cfa 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageFinder.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageFinder.java @@ -71,7 +71,7 @@ public void findMessages(final long timestamp, AsyncCallbacks.FindEntryCallback entry.release(); } return false; - }, this, callback); + }, this, callback, true); } else { if (log.isDebugEnabled()) { log.debug("[{}][{}] Ignore message position find scheduled task, last find is still running", topicName, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java index a556237f4342c..0c3c9d63ad11b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java @@ -112,7 +112,7 @@ public PersistentReplicator(String localCluster, PersistentTopic localTopic, Man String remoteCluster, String remoteTopic, BrokerService brokerService, PulsarClientImpl replicationClient) throws PulsarServerException { - super(localCluster, localTopic.getName(), remoteCluster, remoteTopic, localTopic.getReplicatorPrefix(), + super(localCluster, localTopic, remoteCluster, remoteTopic, localTopic.getReplicatorPrefix(), brokerService, replicationClient); this.topic = localTopic; this.cursor = cursor; @@ -529,6 +529,12 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { public void readEntryComplete(Entry entry, Object ctx) { future.complete(entry); } + + @Override + public String toString() { + return String.format("Replication [%s] peek Nth message", + PersistentReplicator.this.producer.getProducerName()); + } }, null); return future; @@ -545,6 +551,13 @@ public void deleteComplete(Object ctx) { public void deleteFailed(ManagedLedgerException exception, Object ctx) { log.error("[{}] Failed to delete message at {}: {}", replicatorId, ctx, exception.getMessage(), exception); + if (exception instanceof CursorAlreadyClosedException) { + log.error("[{}] Asynchronous ack failure because replicator is already deleted and cursor is already" + + " closed {}, ({})", replicatorId, ctx, exception.getMessage(), exception); + // replicator is already deleted and cursor is already closed so, producer should also be stopped + closeProducerAsync(); + return; + } if (ctx instanceof PositionImpl) { PositionImpl deletedEntry = (PositionImpl) ctx; if (deletedEntry.compareTo((PositionImpl) cursor.getMarkDeletedPosition()) > 0) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java index 1a8c6e180a2a2..8f05530f58bfa 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java @@ -71,17 +71,12 @@ public class PersistentStickyKeyDispatcherMultipleConsumers extends PersistentDi */ private final LinkedHashMap recentlyJoinedConsumers; - private final Set stuckConsumers; - private final Set nextStuckConsumers; - PersistentStickyKeyDispatcherMultipleConsumers(PersistentTopic topic, ManagedCursor cursor, Subscription subscription, ServiceConfiguration conf, KeySharedMeta ksm) { super(topic, cursor, subscription, ksm.isAllowOutOfOrderDelivery()); this.allowOutOfOrderDelivery = ksm.isAllowOutOfOrderDelivery(); this.recentlyJoinedConsumers = allowOutOfOrderDelivery ? null : new LinkedHashMap<>(); - this.stuckConsumers = new HashSet<>(); - this.nextStuckConsumers = new HashSet<>(); this.keySharedMode = ksm.getKeySharedMode(); switch (this.keySharedMode) { case AUTO_SPLIT: @@ -226,8 +221,6 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis } } - nextStuckConsumers.clear(); - final Map> groupedEntries = localGroupedEntries.get(); groupedEntries.clear(); final Map> consumerStickyKeyHashesMap = new HashMap<>(); @@ -318,14 +311,11 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis // acquire message-dispatch permits for already delivered messages acquirePermitsForDeliveredMessages(topic, cursor, totalEntries, totalMessagesSent, totalBytesSent); - stuckConsumers.clear(); - if (totalMessagesSent == 0 && (recentlyJoinedConsumers == null || recentlyJoinedConsumers.isEmpty())) { // This means, that all the messages we've just read cannot be dispatched right now. // This condition can only happen when: // 1. We have consumers ready to accept messages (otherwise the would not haven been triggered) // 2. All keys in the current set of messages are routing to consumers that are currently busy - // and stuck is not caused by stuckConsumers // // The solution here is to move on and read next batch of messages which might hopefully contain // also keys meant for other consumers. @@ -334,10 +324,7 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis // ahead in the stream while the new consumers are not ready to accept the new messages, // therefore would be most likely only increase the distance between read-position and mark-delete // position. - if (!nextStuckConsumers.isEmpty()) { - isDispatcherStuckOnReplays = true; - stuckConsumers.addAll(nextStuckConsumers); - } + isDispatcherStuckOnReplays = true; return true; } else if (currentThreadKeyNumber == 0) { return true; @@ -348,8 +335,6 @@ protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis private int getRestrictedMaxEntriesForConsumer(Consumer consumer, List entries, int maxMessages, ReadType readType, Set stickyKeyHashes) { if (maxMessages == 0) { - // the consumer was stuck - nextStuckConsumers.add(consumer); return 0; } if (readType == ReadType.Normal && stickyKeyHashes != null @@ -366,13 +351,6 @@ private int getRestrictedMaxEntriesForConsumer(Consumer consumer, List en // At this point, all the old messages were already consumed and this consumer // is now ready to receive any message if (maxReadPosition == null) { - // stop to dispatch by stuckConsumers - if (stuckConsumers.contains(consumer)) { - if (log.isDebugEnabled()) { - log.debug("[{}] stop to dispatch by stuckConsumers, consumer: {}", name, consumer); - } - return 0; - } // The consumer has not recently joined, so we can send all messages return maxMessages; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStreamingDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStreamingDispatcherSingleActiveConsumer.java index efe9de778a3e7..52bcc474c4bc2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStreamingDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStreamingDispatcherSingleActiveConsumer.java @@ -217,8 +217,8 @@ protected void readMoreEntries(Consumer consumer) { havePendingRead = true; if (consumer.readCompacted()) { - topic.getCompactedTopic().asyncReadEntriesOrWait(cursor, messagesToRead, isFirstRead, - this, consumer); + topic.getCompactedTopic().asyncReadEntriesOrWait(cursor, messagesToRead, bytesToRead, + PositionImpl.LATEST, isFirstRead, this, consumer); } else { streamingEntryReader.asyncReadEntries(messagesToRead, bytesToRead, consumer); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java index d283cac77c7b6..42db3dad175c1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java @@ -29,6 +29,7 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; @@ -202,7 +203,12 @@ public boolean setReplicated(boolean replicated) { if (this.cursor != null) { if (replicated) { - return this.cursor.putProperty(REPLICATED_SUBSCRIPTION_PROPERTY, 1L); + if (!config.isEnableReplicatedSubscriptions()) { + log.warn("[{}][{}] Failed set replicated subscription status to {}, please enable the " + + "configuration enableReplicatedSubscriptions", topicName, subName, replicated); + } else { + return this.cursor.putProperty(REPLICATED_SUBSCRIPTION_PROPERTY, 1L); + } } else { return this.cursor.removeProperty(REPLICATED_SUBSCRIPTION_PROPERTY); } @@ -520,9 +526,15 @@ public String getTypeString() { return "Null"; } - @Override public CompletableFuture analyzeBacklog(Optional position) { - + final ManagedLedger managedLedger = topic.getManagedLedger(); + final String newNonDurableCursorName = "analyze-backlog-" + UUID.randomUUID(); + ManagedCursor newNonDurableCursor; + try { + newNonDurableCursor = ((ManagedCursorImpl) cursor).duplicateNonDurableCursor(newNonDurableCursorName); + } catch (ManagedLedgerException e) { + return CompletableFuture.failedFuture(e); + } long start = System.currentTimeMillis(); if (log.isDebugEnabled()) { log.debug("[{}][{}] Starting to analyze backlog", topicName, subName); @@ -537,7 +549,7 @@ public CompletableFuture analyzeBacklog(Optional AtomicLong rejectedMessages = new AtomicLong(); AtomicLong rescheduledMessages = new AtomicLong(); - Position currentPosition = cursor.getMarkDeletedPosition(); + Position currentPosition = newNonDurableCursor.getMarkDeletedPosition(); if (log.isDebugEnabled()) { log.debug("[{}][{}] currentPosition {}", @@ -597,7 +609,7 @@ public CompletableFuture analyzeBacklog(Optional return true; }; - return cursor.scan( + CompletableFuture res = newNonDurableCursor.scan( position, condition, batchSize, @@ -624,7 +636,22 @@ public CompletableFuture analyzeBacklog(Optional topicName, subName, end - start, result); return result; }); + res.whenComplete((__, ex) -> { + managedLedger.asyncDeleteCursor(newNonDurableCursorName, + new AsyncCallbacks.DeleteCursorCallback(){ + @Override + public void deleteCursorComplete(Object ctx) { + // Nothing to do. + } + @Override + public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) { + log.warn("[{}][{}] Delete non-durable cursor[{}] failed when analyze backlog.", + topicName, subName, newNonDurableCursor.getName()); + } + }, null); + }); + return res; } @Override @@ -852,6 +879,12 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { public void readEntryComplete(Entry entry, Object ctx) { future.complete(entry); } + + @Override + public String toString() { + return String.format("Subscription [%s-%s] async replay entries", PersistentSubscription.this.topicName, + PersistentSubscription.this.subName); + } }, null); return future; @@ -1062,8 +1095,9 @@ public List getConsumers() { @Override public boolean expireMessages(int messageTTLInSeconds) { - if ((getNumberOfEntriesInBacklog(false) == 0) || (dispatcher != null && dispatcher.isConsumerConnected() - && getNumberOfEntriesInBacklog(false) < MINIMUM_BACKLOG_FOR_EXPIRY_CHECK + long backlog = getNumberOfEntriesInBacklog(false); + if (backlog == 0 || (dispatcher != null && dispatcher.isConsumerConnected() + && backlog < MINIMUM_BACKLOG_FOR_EXPIRY_CHECK && !topic.isOldestMessageExpired(cursor, messageTTLInSeconds))) { // don't do anything for almost caught-up connected subscriptions return false; @@ -1162,16 +1196,20 @@ public SubscriptionStatsImpl getStats(Boolean getPreciseBacklog, boolean subscri } else { subStats.backlogSize = -1; } - if (getEarliestTimeInBacklog && subStats.msgBacklog > 0) { - ManagedLedgerImpl managedLedger = ((ManagedLedgerImpl) cursor.getManagedLedger()); - PositionImpl markDeletedPosition = (PositionImpl) cursor.getMarkDeletedPosition(); - long result = 0; - try { - result = managedLedger.getEarliestMessagePublishTimeOfPos(markDeletedPosition).get(); - } catch (InterruptedException | ExecutionException e) { - result = -1; + if (getEarliestTimeInBacklog) { + if (subStats.msgBacklog > 0) { + ManagedLedgerImpl managedLedger = ((ManagedLedgerImpl) cursor.getManagedLedger()); + PositionImpl markDeletedPosition = (PositionImpl) cursor.getMarkDeletedPosition(); + long result = 0; + try { + result = managedLedger.getEarliestMessagePublishTimeOfPos(markDeletedPosition).get(); + } catch (InterruptedException | ExecutionException e) { + result = -1; + } + subStats.earliestMsgPublishTimeInBacklog = result; + } else { + subStats.earliestMsgPublishTimeInBacklog = -1; } - subStats.earliestMsgPublishTimeInBacklog = result; } subStats.msgBacklogNoDelayed = subStats.msgBacklog - subStats.msgDelayed; subStats.msgRateExpired = expiryMonitor.getMessageExpiryRate(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index 0f5e60439810f..47e314bb3198b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service.persistent; +import static java.util.Objects.requireNonNull; import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.pulsar.broker.service.persistent.SubscribeRateLimiter.isSubscribeRateEnabled; import static org.apache.pulsar.common.naming.SystemTopicNames.isEventSystemTopic; @@ -81,6 +82,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.delayed.BucketDelayedDeliveryTrackerFactory; +import org.apache.pulsar.broker.delayed.DelayedDeliveryTrackerFactory; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateCompactionStrategy; import org.apache.pulsar.broker.namespace.NamespaceService; @@ -160,6 +162,7 @@ import org.apache.pulsar.common.policies.data.stats.TopicStatsImpl; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.schema.SchemaData; +import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.common.topics.TopicCompactionStrategy; import org.apache.pulsar.common.util.Codec; @@ -192,6 +195,10 @@ public class PersistentTopic extends AbstractTopic implements Topic, AddEntryCal private final TopicName shadowSourceTopic; static final String DEDUPLICATION_CURSOR_NAME = "pulsar.dedup"; + + public static boolean isDedupCursorName(String name) { + return DEDUPLICATION_CURSOR_NAME.equals(name); + } private static final String TOPIC_EPOCH_PROPERTY_NAME = "pulsar.topic.epoch"; private static final double MESSAGE_EXPIRY_THRESHOLD = 1.5; @@ -210,7 +217,8 @@ public class PersistentTopic extends AbstractTopic implements Topic, AddEntryCal protected final MessageDeduplication messageDeduplication; private static final long COMPACTION_NEVER_RUN = -0xfebecffeL; - private CompletableFuture currentCompaction = CompletableFuture.completedFuture(COMPACTION_NEVER_RUN); + private volatile CompletableFuture currentCompaction = CompletableFuture.completedFuture( + COMPACTION_NEVER_RUN); private final CompactedTopic compactedTopic; // TODO: Create compaction strategy from topic policy when exposing strategic compaction to users. @@ -293,7 +301,6 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS .build(); this.backloggedCursorThresholdEntries = brokerService.pulsar().getConfiguration().getManagedLedgerCursorBackloggedThreshold(); - registerTopicPolicyListener(); this.compactedTopic = new CompactedTopicImpl(brokerService.pulsar().getBookKeeperClient()); @@ -320,7 +327,8 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS checkReplicatedSubscriptionControllerState(); TopicName topicName = TopicName.get(topic); if (brokerService.getPulsar().getConfiguration().isTransactionCoordinatorEnabled() - && !isEventSystemTopic(topicName)) { + && !isEventSystemTopic(topicName) + && !NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { this.transactionBuffer = brokerService.getPulsar() .getTransactionBufferProvider().newTransactionBuffer(this); } else { @@ -350,8 +358,8 @@ public CompletableFuture initialize() { .thenAcceptAsync(optPolicies -> { if (!optPolicies.isPresent()) { isEncryptionRequired = false; - updatePublishDispatcher(); - updateResourceGroupLimiter(optPolicies); + updatePublishRateLimiter(); + updateResourceGroupLimiter(new Policies()); initializeDispatchRateLimiterIfNeeded(); updateSubscribeRateLimiter(); return; @@ -365,9 +373,9 @@ public CompletableFuture initialize() { updateSubscribeRateLimiter(); - updatePublishDispatcher(); + updatePublishRateLimiter(); - updateResourceGroupLimiter(optPolicies); + updateResourceGroupLimiter(policies); this.isEncryptionRequired = policies.encryption_required; @@ -488,7 +496,7 @@ private PersistentSubscription createPersistentSubscription(String subscriptionN } } - private static boolean isCompactionSubscription(String subscriptionName) { + public static boolean isCompactionSubscription(String subscriptionName) { return COMPACTION_SUBSCRIPTION.equals(subscriptionName); } @@ -818,7 +826,7 @@ private CompletableFuture internalSubscribe(final TransportCnx cnx, St } try { - if (!topic.endsWith(SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME) + if (!SystemTopicNames.isTopicPoliciesSystemTopic(topic) && !checkSubscriptionTypesEnable(subType)) { return FutureUtil.failedFuture( new NotAllowedException("Topic[{" + topic + "}] doesn't support " @@ -895,8 +903,8 @@ private CompletableFuture internalSubscribe(final TransportCnx cnx, St consumer.close(); } catch (BrokerServiceException e) { if (e instanceof ConsumerBusyException) { - log.warn("[{}][{}] Consumer {} {} already connected", - topic, subscriptionName, consumerId, consumerName); + log.warn("[{}][{}] Consumer {} {} already connected: {}", + topic, subscriptionName, consumerId, consumerName, e.getMessage()); } else if (e instanceof SubscriptionBusyException) { log.warn("[{}][{}] {}", topic, subscriptionName, e.getMessage()); } @@ -926,8 +934,8 @@ private CompletableFuture internalSubscribe(final TransportCnx cnx, St decrementUsageCount(); if (ex.getCause() instanceof ConsumerBusyException) { - log.warn("[{}][{}] Consumer {} {} already connected", topic, subscriptionName, consumerId, - consumerName); + log.warn("[{}][{}] Consumer {} {} already connected: {}", topic, subscriptionName, consumerId, + consumerName, ex.getCause().getMessage()); Consumer consumer = null; try { consumer = subscriptionFuture.isDone() ? getActiveConsumer(subscriptionFuture.get()) : null; @@ -1155,39 +1163,65 @@ public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { private void asyncDeleteCursorWithClearDelayedMessage(String subscriptionName, CompletableFuture unsubscribeFuture) { - if (!isDelayedDeliveryEnabled() - || !(brokerService.getDelayedDeliveryTrackerFactory() instanceof BucketDelayedDeliveryTrackerFactory)) { - asyncDeleteCursor(subscriptionName, unsubscribeFuture); + PersistentSubscription persistentSubscription = subscriptions.get(subscriptionName); + if (persistentSubscription == null) { + log.warn("[{}][{}] Can't find subscription, skip delete cursor", topic, subscriptionName); + unsubscribeFuture.complete(null); return; } - PersistentSubscription persistentSubscription = subscriptions.get(subscriptionName); - if (persistentSubscription == null) { - log.warn("[{}][{}] Can't find subscription, skip clear delayed message", topic, subscriptionName); - asyncDeleteCursor(subscriptionName, unsubscribeFuture); + if (!isDelayedDeliveryEnabled() + || !(brokerService.getDelayedDeliveryTrackerFactory() instanceof BucketDelayedDeliveryTrackerFactory)) { + asyncDeleteCursorWithCleanCompactionLedger(persistentSubscription, unsubscribeFuture); return; } Dispatcher dispatcher = persistentSubscription.getDispatcher(); - final Dispatcher temporaryDispatcher; if (dispatcher == null) { - log.info("[{}][{}] Dispatcher is null, try to create temporary dispatcher to clear delayed message", topic, - subscriptionName); - dispatcher = temporaryDispatcher = - new PersistentDispatcherMultipleConsumers(this, persistentSubscription.cursor, - persistentSubscription); - } else { - temporaryDispatcher = null; + DelayedDeliveryTrackerFactory delayedDeliveryTrackerFactory = + brokerService.getDelayedDeliveryTrackerFactory(); + if (delayedDeliveryTrackerFactory instanceof BucketDelayedDeliveryTrackerFactory + bucketDelayedDeliveryTrackerFactory) { + ManagedCursor cursor = persistentSubscription.getCursor(); + bucketDelayedDeliveryTrackerFactory.cleanResidualSnapshots(cursor).whenComplete((__, ex) -> { + if (ex != null) { + unsubscribeFuture.completeExceptionally(ex); + } else { + asyncDeleteCursorWithCleanCompactionLedger(persistentSubscription, unsubscribeFuture); + } + }); + } + return; } dispatcher.clearDelayedMessages().whenComplete((__, ex) -> { if (ex != null) { unsubscribeFuture.completeExceptionally(ex); } else { - asyncDeleteCursor(subscriptionName, unsubscribeFuture); + asyncDeleteCursorWithCleanCompactionLedger(persistentSubscription, unsubscribeFuture); } - if (temporaryDispatcher != null) { - temporaryDispatcher.close(); + }); + } + + private void asyncDeleteCursorWithCleanCompactionLedger(PersistentSubscription subscription, + CompletableFuture unsubscribeFuture) { + final String subscriptionName = subscription.getName(); + if ((!isCompactionSubscription(subscriptionName)) || !(subscription instanceof CompactorSubscription)) { + asyncDeleteCursor(subscriptionName, unsubscribeFuture); + return; + } + + currentCompaction.handle((__, e) -> { + if (e != null) { + log.warn("[{}][{}] Last compaction task failed", topic, subscriptionName); + } + return ((CompactorSubscription) subscription).cleanCompactedLedger(); + }).whenComplete((__, ex) -> { + if (ex != null) { + log.error("[{}][{}] Error cleaning compacted ledger", topic, subscriptionName, ex); + unsubscribeFuture.completeExceptionally(ex); + } else { + asyncDeleteCursor(subscriptionName, unsubscribeFuture); } }); } @@ -1503,7 +1537,7 @@ public void closeFailed(ManagedLedgerException exception, Object ctx) { } private void disposeTopic(CompletableFuture closeFuture) { - brokerService.removeTopicFromCache(topic) + brokerService.removeTopicFromCache(PersistentTopic.this) .thenRun(() -> { replicatedSubscriptionsController.ifPresent(ReplicatedSubscriptionsController::close); @@ -1526,7 +1560,6 @@ private void disposeTopic(CompletableFuture closeFuture) { CompletableFuture checkReplicationAndRetryOnFailure() { CompletableFuture result = new CompletableFuture(); checkReplication().thenAccept(res -> { - log.info("[{}] Policies updated successfully", topic); result.complete(null); }).exceptionally(th -> { log.error("[{}] Policies update failed {}, scheduled retry in {} seconds", topic, th.getMessage(), @@ -1546,7 +1579,8 @@ public CompletableFuture checkDeduplicationStatus() { return messageDeduplication.checkStatus(); } - private CompletableFuture checkPersistencePolicies() { + @VisibleForTesting + CompletableFuture checkPersistencePolicies() { TopicName topicName = TopicName.get(topic); CompletableFuture future = new CompletableFuture<>(); brokerService.getManagedLedgerConfig(topicName).thenAccept(config -> { @@ -1573,6 +1607,11 @@ public CompletableFuture checkReplication() { } List configuredClusters = topicPolicies.getReplicationClusters().get(); + if (CollectionUtils.isEmpty(configuredClusters)) { + log.warn("[{}] No replication clusters configured", name); + return CompletableFuture.completedFuture(null); + } + int newMessageTTLInSeconds = topicPolicies.getMessageTTLInSeconds().get(); String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); @@ -1647,11 +1686,11 @@ private CompletableFuture checkShadowReplication() { public void checkMessageExpiry() { int messageTtlInSeconds = topicPolicies.getMessageTTLInSeconds().get(); if (messageTtlInSeconds != 0) { - subscriptions.forEach((__, sub) -> sub.expireMessages(messageTtlInSeconds)); - replicators.forEach((__, replicator) - -> ((PersistentReplicator) replicator).expireMessages(messageTtlInSeconds)); - shadowReplicators.forEach((__, replicator) - -> ((PersistentReplicator) replicator).expireMessages(messageTtlInSeconds)); + subscriptions.forEach((__, sub) -> { + if (!isCompactionSubscription(sub.getName())) { + sub.expireMessages(messageTtlInSeconds); + } + }); } } @@ -2222,9 +2261,8 @@ public CompletableFuture asyncGetStats(boolean getPreciseBacklog if (producer.isRemote()) { remotePublishersStats.put(producer.getRemoteCluster(), publisherStats); - } else { - stats.addPublisher(publisherStats); } + stats.addPublisher(publisherStats); }); stats.averageMsgSize = stats.msgRateIn == 0.0 ? 0.0 : (stats.msgThroughputIn / stats.msgRateIn); @@ -2285,7 +2323,7 @@ public CompletableFuture asyncGetStats(boolean getPreciseBacklog stats.backlogSize = ledger.getEstimatedBacklogSize(); stats.deduplicationStatus = messageDeduplication.getStatus().toString(); stats.topicEpoch = topicEpoch.orElse(null); - stats.ownerBroker = brokerService.pulsar().getLookupServiceAddress(); + stats.ownerBroker = brokerService.pulsar().getBrokerId(); stats.offloadedStorageSize = ledger.getOffloadedSize(); stats.lastOffloadLedgerId = ledger.getLastOffloadedLedgerId(); stats.lastOffloadSuccessTimeStamp = ledger.getLastOffloadedSuccessTimestamp(); @@ -2324,6 +2362,15 @@ private Optional getCompactorMXBean() { return Optional.ofNullable(compactor).map(c -> c.getStats()); } + @Override + public CompletableFuture deleteSchema() { + if (TopicName.get(getName()).isPartitioned()) { + // Only delete schema when partitioned metadata is deleting. + return CompletableFuture.completedFuture(null); + } + return brokerService.deleteSchema(TopicName.get(getName())); + } + @Override public CompletableFuture getInternalStats(boolean includeLedgerMetadata) { @@ -2627,7 +2674,7 @@ public void checkGC() { replCloseFuture.thenCompose(v -> delete(deleteMode == InactiveTopicDeleteMode.delete_when_no_subscriptions, deleteMode == InactiveTopicDeleteMode.delete_when_subscriptions_caught_up, false)) - .thenApply((res) -> tryToDeletePartitionedMetadata()) + .thenCompose((res) -> tryToDeletePartitionedMetadata()) .thenRun(() -> log.info("[{}] Topic deleted successfully due to inactivity", topic)) .exceptionally(e -> { if (e.getCause() instanceof TopicBusyException) { @@ -2635,6 +2682,8 @@ public void checkGC() { if (log.isDebugEnabled()) { log.debug("[{}] Did not delete busy topic: {}", topic, e.getCause().getMessage()); } + } else if (e.getCause() instanceof UnsupportedOperationException) { + log.info("[{}] Skip to delete partitioned topic: {}", topic, e.getCause().getMessage()); } else { log.warn("[{}] Inactive topic deletion failed", topic, e); } @@ -2679,7 +2728,7 @@ private CompletableFuture tryToDeletePartitionedMetadata() { .filter(topicExist -> topicExist) .findAny(); if (anyExistPartition.isPresent()) { - log.error("[{}] Delete topic metadata failed because" + log.info("[{}] Delete topic metadata failed because" + " another partition exist.", topicName); throw new UnsupportedOperationException( String.format("Another partition exists for [%s].", @@ -2709,7 +2758,9 @@ public void checkInactiveSubscriptions() { .toMillis(nsExpirationTime == null ? defaultExpirationTime : nsExpirationTime); if (expirationTimeMillis > 0) { subscriptions.forEach((subName, sub) -> { - if (sub.dispatcher != null && sub.dispatcher.isConsumerConnected() || sub.isReplicated()) { + if (sub.dispatcher != null && sub.dispatcher.isConsumerConnected() + || sub.isReplicated() + || isCompactionSubscription(subName)) { return; } if (System.currentTimeMillis() - sub.cursor.getLastActive() > expirationTimeMillis) { @@ -2783,7 +2834,8 @@ public void updateDispatchRateLimiter() { } @Override - public CompletableFuture onPoliciesUpdate(Policies data) { + public CompletableFuture onPoliciesUpdate(@Nonnull Policies data) { + requireNonNull(data); if (log.isDebugEnabled()) { log.debug("[{}] isEncryptionRequired changes: {} -> {}", topic, isEncryptionRequired, data.encryption_required); @@ -2793,39 +2845,60 @@ public CompletableFuture onPoliciesUpdate(Policies data) { return CompletableFuture.completedFuture(null); } + // Update props. + // The component "EntryFilters" is update in the method "updateTopicPolicyByNamespacePolicy(data)". + // see more detail: https://github.com/apache/pulsar/pull/19364. updateTopicPolicyByNamespacePolicy(data); - + checkReplicatedSubscriptionControllerState(); isEncryptionRequired = data.encryption_required; - isAllowAutoUpdateSchema = data.is_allow_auto_update_schema; - updateDispatchRateLimiter(); - - updateSubscribeRateLimiter(); + // Apply policies for components. + List> applyPolicyTasks = applyUpdatedTopicPolicies(); + applyPolicyTasks.add(applyUpdatedNamespacePolicies(data)); + return FutureUtil.waitForAll(applyPolicyTasks) + .thenAccept(__ -> log.info("[{}] namespace-level policies updated successfully", topic)) + .exceptionally(ex -> { + log.error("[{}] update namespace polices : {} error", this.getName(), data, ex); + throw FutureUtil.wrapToCompletionException(ex); + }); + } - updatePublishDispatcher(); + private CompletableFuture applyUpdatedNamespacePolicies(Policies namespaceLevelPolicies) { + return FutureUtil.runWithCurrentThread(() -> updateResourceGroupLimiter(namespaceLevelPolicies)); + } - this.updateResourceGroupLimiter(Optional.of(data)); + private List> applyUpdatedTopicPolicies() { + List> applyPoliciesFutureList = new ArrayList<>(); - List> producerCheckFutures = new ArrayList<>(producers.size()); - producers.values().forEach(producer -> producerCheckFutures.add( + // Client permission check. + subscriptions.forEach((subName, sub) -> { + sub.getConsumers().forEach(consumer -> applyPoliciesFutureList.add(consumer.checkPermissionsAsync())); + }); + producers.values().forEach(producer -> applyPoliciesFutureList.add( producer.checkPermissionsAsync().thenRun(producer::checkEncryption))); + // Check message expiry. + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkMessageExpiry())); - return FutureUtil.waitForAll(producerCheckFutures).thenCompose((__) -> { - return updateSubscriptionsDispatcherRateLimiter().thenCompose((___) -> { - replicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - shadowReplicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - checkMessageExpiry(); - CompletableFuture replicationFuture = checkReplicationAndRetryOnFailure(); - CompletableFuture dedupFuture = checkDeduplicationStatus(); - CompletableFuture persistentPoliciesFuture = checkPersistencePolicies(); - return CompletableFuture.allOf(replicationFuture, dedupFuture, persistentPoliciesFuture, - preCreateSubscriptionForCompactionIfNeeded()); - }); - }).exceptionally(ex -> { - log.error("[{}] update namespace polices : {} error", this.getName(), data, ex); - throw FutureUtil.wrapToCompletionException(ex); - }); + // Update rate limiters. + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updateDispatchRateLimiter())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updateSubscribeRateLimiter())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updatePublishRateLimiter())); + + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updateSubscriptionsDispatcherRateLimiter())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread( + () -> replicators.forEach((name, replicator) -> replicator.updateRateLimiter()))); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread( + () -> shadowReplicators.forEach((name, replicator) -> replicator.updateRateLimiter()))); + + // Other components. + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkReplicationAndRetryOnFailure())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkDeduplicationStatus())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkPersistencePolicies())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread( + () -> preCreateSubscriptionForCompactionIfNeeded())); + + return applyPoliciesFutureList; } /** @@ -3030,7 +3103,7 @@ public boolean isOldestMessageExpired(ManagedCursor cursor, int messageTTLInSeco // if AutoSkipNonRecoverableData is set to true, just return true here. return true; } else { - log.warn("[{}] Error while getting the oldest message", topic, e); + log.warn("[{}] [{}] Error while getting the oldest message", topic, cursor.toString(), e); } } finally { if (entry != null) { @@ -3159,17 +3232,29 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { public synchronized void triggerCompaction() throws PulsarServerException, AlreadyRunningException { if (currentCompaction.isDone()) { + if (!lock.readLock().tryLock()) { + log.info("[{}] Conflict topic-close, topic-delete, skip triggering compaction", topic); + return; + } + try { + if (isClosingOrDeleting) { + log.info("[{}] Topic is closing or deleting, skip triggering compaction", topic); + return; + } - if (strategicCompactionMap.containsKey(topic)) { - currentCompaction = brokerService.pulsar().getStrategicCompactor() - .compact(topic, strategicCompactionMap.get(topic)); - } else { - currentCompaction = brokerService.pulsar().getCompactor().compact(topic); + if (strategicCompactionMap.containsKey(topic)) { + currentCompaction = brokerService.pulsar().getStrategicCompactor() + .compact(topic, strategicCompactionMap.get(topic)); + } else { + currentCompaction = brokerService.pulsar().getCompactor().compact(topic); + } + } finally { + lock.readLock().unlock(); } currentCompaction.whenComplete((ignore, ex) -> { - if (ex != null){ - log.warn("[{}] Compaction failure.", topic, ex); - } + if (ex != null) { + log.warn("[{}] Compaction failure.", topic, ex); + } }); } else { throw new AlreadyRunningException("Compaction already in progress"); @@ -3249,7 +3334,7 @@ public CompletableFuture addSchemaIfIdleOrCheckCompatible(SchemaData schem .toList().size()) .sum(); if (hasSchema - || (!producers.isEmpty()) + || (userCreatedProducerCount > 0) || (numActiveConsumersWithoutAutoSchema != 0) || (ledger.getTotalSize() != 0)) { return checkSchemaCompatibleForConsumer(schema); @@ -3281,12 +3366,14 @@ private synchronized void checkReplicatedSubscriptionControllerState(boolean sho boolean isCurrentlyEnabled = replicatedSubscriptionsController.isPresent(); boolean isEnableReplicatedSubscriptions = brokerService.pulsar().getConfiguration().isEnableReplicatedSubscriptions(); + boolean replicationEnabled = this.topicPolicies.getReplicationClusters().get().size() > 1; - if (shouldBeEnabled && !isCurrentlyEnabled && isEnableReplicatedSubscriptions) { + if (shouldBeEnabled && !isCurrentlyEnabled && isEnableReplicatedSubscriptions && replicationEnabled) { log.info("[{}] Enabling replicated subscriptions controller", topic); replicatedSubscriptionsController = Optional.of(new ReplicatedSubscriptionsController(this, brokerService.pulsar().getConfiguration().getClusterName())); - } else if (isCurrentlyEnabled && !shouldBeEnabled || !isEnableReplicatedSubscriptions) { + } else if (isCurrentlyEnabled && !shouldBeEnabled || !isEnableReplicatedSubscriptions + || !replicationEnabled) { log.info("[{}] Disabled replicated subscriptions controller", topic); replicatedSubscriptionsController.ifPresent(ReplicatedSubscriptionsController::close); replicatedSubscriptionsController = Optional.empty(); @@ -3466,48 +3553,37 @@ public void onUpdate(TopicPolicies policies) { if (policies == null) { return; } + // Update props. + // The component "EntryFilters" is update in the method "updateTopicPolicy(data)". + // see more detail: https://github.com/apache/pulsar/pull/19364. updateTopicPolicy(policies); shadowTopics = policies.getShadowTopics(); - updateDispatchRateLimiter(); - updateSubscriptionsDispatcherRateLimiter().thenRun(() -> { - updatePublishDispatcher(); - updateSubscribeRateLimiter(); - replicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - shadowReplicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - checkMessageExpiry(); - checkReplicationAndRetryOnFailure(); - - checkDeduplicationStatus(); - - preCreateSubscriptionForCompactionIfNeeded(); + checkReplicatedSubscriptionControllerState(); - // update managed ledger config - checkPersistencePolicies(); - }).exceptionally(e -> { - Throwable t = e instanceof CompletionException ? e.getCause() : e; - log.error("[{}] update topic policy error: {}", topic, t.getMessage(), t); - return null; - }); + // Apply policies for components(not contains the specified policies which only defined in namespace policies). + FutureUtil.waitForAll(applyUpdatedTopicPolicies()) + .thenAccept(__ -> log.info("[{}] topic-level policies updated successfully", topic)) + .exceptionally(e -> { + Throwable t = FutureUtil.unwrapCompletionException(e); + log.error("[{}] update topic-level policy error: {}", topic, t.getMessage(), t); + return null; + }); } - private CompletableFuture updateSubscriptionsDispatcherRateLimiter() { - List> subscriptionCheckFutures = new ArrayList<>((int) subscriptions.size()); + private void updateSubscriptionsDispatcherRateLimiter() { subscriptions.forEach((subName, sub) -> { - List> consumerCheckFutures = new ArrayList<>(sub.getConsumers().size()); - sub.getConsumers().forEach(consumer -> consumerCheckFutures.add(consumer.checkPermissionsAsync())); - subscriptionCheckFutures.add(FutureUtil.waitForAll(consumerCheckFutures).thenRun(() -> { - Dispatcher dispatcher = sub.getDispatcher(); - if (dispatcher != null) { - dispatcher.updateRateLimiter(); - } - })); + Dispatcher dispatcher = sub.getDispatcher(); + if (dispatcher != null) { + dispatcher.updateRateLimiter(); + } }); - return FutureUtil.waitForAll(subscriptionCheckFutures); } protected CompletableFuture initTopicPolicy() { if (brokerService.pulsar().getConfig().isSystemTopicEnabled() && brokerService.pulsar().getConfig().isTopicLevelPoliciesEnabled()) { + brokerService.getPulsar().getTopicPoliciesService() + .registerListener(TopicName.getPartitionedTopicName(topic), this); return CompletableFuture.completedFuture(null).thenRunAsync(() -> onUpdate( brokerService.getPulsar().getTopicPoliciesService() .getTopicPoliciesIfExists(TopicName.getPartitionedTopicName(topic))), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java index 335f2cf8eec08..e011ed8d660f6 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java @@ -192,10 +192,14 @@ private void receiveSubscriptionUpdated(ReplicatedSubscriptionsUpdate update) { sub.acknowledgeMessage(Collections.singletonList(pos), AckType.Cumulative, Collections.emptyMap()); } else { // Subscription doesn't exist. We need to force the creation of the subscription in this cluster, because - log.info("[{}][{}] Creating subscription at {}:{} after receiving update from replicated subcription", + log.info("[{}][{}] Creating subscription at {}:{} after receiving update from replicated subscription", topic, update.getSubscriptionName(), updatedMessageId.getLedgerId(), pos); - topic.createSubscription(update.getSubscriptionName(), - InitialPosition.Latest, true /* replicateSubscriptionState */, null); + topic.createSubscription(update.getSubscriptionName(), InitialPosition.Earliest, + true /* replicateSubscriptionState */, Collections.emptyMap()) + .thenAccept(subscriptionCreated -> { + subscriptionCreated.acknowledgeMessage(Collections.singletonList(pos), + AckType.Cumulative, Collections.emptyMap()); + }); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java index fb306348bcdbb..b48f748bf5dbf 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java @@ -76,18 +76,6 @@ protected boolean replicateEntries(List entries) { continue; } - if (msg.isExpired(messageTTLInSeconds)) { - msgExpired.recordEvent(0 /* no value stat */); - if (log.isDebugEnabled()) { - log.debug("[{}] Discarding expired message at position {}, replicateTo {}", - replicatorId, entry.getPosition(), msg.getReplicateTo()); - } - cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); - entry.release(); - msg.recycle(); - continue; - } - if (STATE_UPDATER.get(this) != State.Started || isLocalMessageSkippedOnce) { // The producer is not ready yet after having stopped/restarted. Drop the message because it will // recovered when the producer is ready diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java index a052f1f6abf33..58f099a9dab80 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java @@ -28,6 +28,7 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.SubscribeRate; import org.apache.pulsar.common.util.RateLimiter; @@ -159,14 +160,21 @@ public void onSubscribeRateUpdate(SubscribeRate subscribeRate) { } /** - * Gets configured subscribe-rate from namespace policies. Returns null if subscribe-rate is not configured - * - * @return + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking + * call. */ + @Deprecated public SubscribeRate getPoliciesSubscribeRate() { return getPoliciesSubscribeRate(brokerService, topicName); } + /** + * @deprecated Avoid using the deprecated method + * #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and blocking + * call. + */ + @Deprecated public static SubscribeRate getPoliciesSubscribeRate(BrokerService brokerService, final String topicName) { final String cluster = brokerService.pulsar().getConfiguration().getClusterName(); final Optional policies = DispatchRateLimiter.getPolicies(brokerService, topicName); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java index 395a8c9075eb3..720ae3c51891e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java @@ -18,13 +18,16 @@ */ package org.apache.pulsar.broker.service.persistent; +import java.util.List; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.plugin.EntryFilter; import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.EntryFilters; public class SystemTopic extends PersistentTopic { @@ -82,4 +85,14 @@ public boolean isEncryptionRequired() { // System topics are only written by the broker that can't know the encryption context. return false; } + + @Override + public EntryFilters getEntryFiltersPolicy() { + return null; + } + + @Override + public List getEntryFilters() { + return null; + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java index a8fc15f296598..c509764bf6710 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java @@ -472,18 +472,20 @@ private CompletableFuture updateSchemaLocator( .build() , locatorEntry.version ).thenApply(ignore -> nextVersion).whenComplete((__, ex) -> { - Throwable cause = FutureUtil.unwrapCompletionException(ex); - log.warn("[{}] Failed to update schema locator with position {}", schemaId, position, cause); - if (cause instanceof AlreadyExistsException || cause instanceof BadVersionException) { - bookKeeper.asyncDeleteLedger(position.getLedgerId(), new AsyncCallback.DeleteCallback() { - @Override - public void deleteComplete(int rc, Object ctx) { - if (rc != BKException.Code.OK) { - log.warn("[{}] Failed to delete ledger {} after updating schema locator failed, rc: {}", + if (ex != null) { + Throwable cause = FutureUtil.unwrapCompletionException(ex); + log.warn("[{}] Failed to update schema locator with position {}", schemaId, position, cause); + if (cause instanceof AlreadyExistsException || cause instanceof BadVersionException) { + bookKeeper.asyncDeleteLedger(position.getLedgerId(), new AsyncCallback.DeleteCallback() { + @Override + public void deleteComplete(int rc, Object ctx) { + if (rc != BKException.Code.OK) { + log.warn("[{}] Failed to delete ledger {} after updating schema locator failed, rc: {}", schemaId, position.getLedgerId(), rc); + } } - } - }, null); + }, null); + } } }); } @@ -705,7 +707,8 @@ public static Exception bkException(String operation, int rc, long ledgerId, lon message += " - entry=" + entryId; } boolean recoverable = rc != BKException.Code.NoSuchLedgerExistsException - && rc != BKException.Code.NoSuchEntryException; + && rc != BKException.Code.NoSuchEntryException + && rc != BKException.Code.NoSuchLedgerExistsOnMetadataServerException; return new SchemaException(recoverable, message); } @@ -714,7 +717,8 @@ public static CompletableFuture ignoreUnrecoverableBKException(Completabl if (t.getCause() != null && (t.getCause() instanceof SchemaException) && !((SchemaException) t.getCause()).isRecoverable()) { - // Meeting NoSuchLedgerExistsException or NoSuchEntryException when reading schemas in + // Meeting NoSuchLedgerExistsException, NoSuchEntryException or + // NoSuchLedgerExistsOnMetadataServerException when reading schemas in // bookkeeper. This also means that the data has already been deleted by other operations // in deleting schema. if (log.isDebugEnabled()) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java index 604265b554050..815778ecc9575 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.stats; +import static com.google.common.base.Preconditions.checkArgument; import static io.prometheus.client.CollectorRegistry.defaultRegistry; import io.prometheus.client.Collector; import io.prometheus.client.Summary; @@ -70,6 +71,7 @@ public DimensionStats(String name, long updateDurationInSec) { } public void recordDimensionTimeValue(long latency, TimeUnit unit) { + checkArgument(latency >= 0); summary.observe(unit.toMillis(latency)); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java index 9fe5588044d2f..d0dc4fe2a7e7d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java @@ -34,7 +34,7 @@ public class AggregatedNamespaceStats { public double throughputIn; public double throughputOut; - public long messageAckRate; + public double messageAckRate; public long bytesInCounter; public long msgInCounter; public long bytesOutCounter; @@ -64,7 +64,7 @@ public class AggregatedNamespaceStats { long compactionCompactedEntriesCount; long compactionCompactedEntriesSize; StatsBuckets compactionLatencyBuckets = new StatsBuckets(CompactionRecord.WRITE_LATENCY_BUCKETS_USEC); - int delayedMessageIndexSizeInBytes; + long delayedMessageIndexSizeInBytes; Map bucketDelayedIndexStats = new HashMap<>(); @@ -182,14 +182,34 @@ public void reset() { rateOut = 0; throughputIn = 0; throughputOut = 0; + messageAckRate = 0; + bytesInCounter = 0; + msgInCounter = 0; + + bytesOutCounter = 0; + msgOutCounter = 0; msgBacklog = 0; msgDelayed = 0; + ongoingTxnCount = 0; + abortedTxnCount = 0; + committedTxnCount = 0; + backlogQuotaLimit = 0; backlogQuotaLimitTime = -1; replicationStats.clear(); subscriptionStats.clear(); + + compactionRemovedEventCount = 0; + compactionSucceedCount = 0; + compactionFailedCount = 0; + compactionDurationTimeInMills = 0; + compactionReadThroughput = 0; + compactionWriteThroughput = 0; + compactionCompactedEntriesCount = 0; + compactionCompactedEntriesSize = 0; + delayedMessageIndexSizeInBytes = 0; bucketDelayedIndexStats.clear(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java index df107b6d7d9c4..93cbad4e19503 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java @@ -41,7 +41,11 @@ void writeSample(String metricName, Number value, String... labelsAndValuesArray SimpleTextOutputStream stream = initGaugeType(metricName); stream.write(metricName).write('{'); for (int i = 0; i < labelsAndValuesArray.length; i += 2) { - stream.write(labelsAndValuesArray[i]).write("=\"").write(labelsAndValuesArray[i + 1]).write('\"'); + String labelValue = labelsAndValuesArray[i + 1]; + if (labelValue != null) { + labelValue = labelValue.replace("\"", "\\\""); + } + stream.write(labelsAndValuesArray[i]).write("=\"").write(labelValue).write('\"'); if (i + 2 != labelsAndValuesArray.length) { stream.write(','); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java index 8ade2bc883f9a..c2816f5a2a013 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.stats.prometheus.metrics; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.stats.Counter; @@ -57,6 +58,7 @@ public void addCount(long delta) { @Override public void addLatency(long eventLatency, TimeUnit unit) { + checkArgument(eventLatency >= 0); long valueMillis = unit.toMillis(eventLatency); counter.add(valueMillis); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java index 3fd8921c15efa..b7cff2e08c2d0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java @@ -30,6 +30,8 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.TypedMessageBuilder; +import org.apache.pulsar.client.api.schema.SchemaDefinition; +import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.events.ActionType; import org.apache.pulsar.common.events.PulsarEvent; import org.apache.pulsar.common.naming.TopicName; @@ -41,13 +43,17 @@ */ public class TopicPoliciesSystemTopicClient extends SystemTopicClientBase { + static Schema avroSchema = DefaultImplementation.getDefaultImplementation() + .newAvroSchema(SchemaDefinition.builder().withPojo(PulsarEvent.class).build()); + public TopicPoliciesSystemTopicClient(PulsarClient client, TopicName topicName) { super(client, topicName); + } @Override protected CompletableFuture> newWriterAsyncInternal() { - return client.newProducer(Schema.AVRO(PulsarEvent.class)) + return client.newProducer(avroSchema) .topic(topicName.toString()) .enableBatching(false) .createAsync() @@ -61,7 +67,7 @@ protected CompletableFuture> newWriterAsyncInternal() { @Override protected CompletableFuture> newReaderAsyncInternal() { - return client.newReader(Schema.AVRO(PulsarEvent.class)) + return client.newReader(avroSchema) .topic(topicName.toString()) .startMessageId(MessageId.earliest) .readCompacted(true) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/SnapshotSegmentAbortedTxnProcessorImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/SnapshotSegmentAbortedTxnProcessorImpl.java index 751c03aff95a9..4f85270be95c3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/SnapshotSegmentAbortedTxnProcessorImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/SnapshotSegmentAbortedTxnProcessorImpl.java @@ -47,6 +47,7 @@ import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.systopic.SystemTopicClient; import org.apache.pulsar.broker.transaction.buffer.AbortedTxnProcessor; +import org.apache.pulsar.broker.transaction.buffer.metadata.TransactionBufferSnapshot; import org.apache.pulsar.broker.transaction.buffer.metadata.v2.TransactionBufferSnapshotIndex; import org.apache.pulsar.broker.transaction.buffer.metadata.v2.TransactionBufferSnapshotIndexes; import org.apache.pulsar.broker.transaction.buffer.metadata.v2.TransactionBufferSnapshotIndexesMetadata; @@ -57,6 +58,7 @@ import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; @@ -265,7 +267,7 @@ public CompletableFuture recoverFromSnapshot() { PositionImpl finalStartReadCursorPosition = startReadCursorPosition; TransactionBufferSnapshotIndexes finalPersistentSnapshotIndexes = persistentSnapshotIndexes; if (persistentSnapshotIndexes == null) { - return CompletableFuture.completedFuture(null); + return recoverOldSnapshot(); } else { this.unsealedTxnIds = convertTypeToTxnID(persistentSnapshotIndexes .getSnapshot().getAborts()); @@ -320,6 +322,12 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { hasInvalidIndex.set(true); } } + + @Override + public String toString() { + return String.format("Transaction buffer [%s] recover from snapshot", + SnapshotSegmentAbortedTxnProcessorImpl.this.topic.getName()); + } }, null); }); openManagedLedgerAndHandleSegmentsFuture.complete(null); @@ -378,6 +386,71 @@ public void openReadOnlyManagedLedgerFailed(ManagedLedgerException exception, Ob .getExecutor(this)); } + // This method will be deprecated and removed in version 4.x.0 + private CompletableFuture recoverOldSnapshot() { + return topic.getBrokerService().getPulsar().getPulsarResources().getTopicResources() + .listPersistentTopicsAsync(NamespaceName.get(TopicName.get(topic.getName()).getNamespace())) + .thenCompose(topics -> { + if (!topics.contains(TopicDomain.persistent + "://" + + TopicName.get(topic.getName()).getNamespace() + "/" + + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT)) { + return CompletableFuture.completedFuture(null); + } else { + return topic.getBrokerService().getPulsar().getTransactionBufferSnapshotServiceFactory() + .getTxnBufferSnapshotService() + .createReader(TopicName.get(topic.getName())).thenComposeAsync(snapshotReader -> { + PositionImpl startReadCursorPositionInOldSnapshot = null; + try { + while (snapshotReader.hasMoreEvents()) { + Message message = snapshotReader.readNextAsync() + .get(getSystemClientOperationTimeoutMs(), TimeUnit.MILLISECONDS); + if (topic.getName().equals(message.getKey())) { + TransactionBufferSnapshot transactionBufferSnapshot = + message.getValue(); + if (transactionBufferSnapshot != null) { + handleOldSnapshot(transactionBufferSnapshot); + startReadCursorPositionInOldSnapshot = PositionImpl.get( + transactionBufferSnapshot.getMaxReadPositionLedgerId(), + transactionBufferSnapshot.getMaxReadPositionEntryId()); + } + } + } + } catch (TimeoutException ex) { + Throwable t = FutureUtil.unwrapCompletionException(ex); + String errorMessage = String.format("[%s] Transaction buffer recover fail by " + + "read transactionBufferSnapshot timeout!", topic.getName()); + log.error(errorMessage, t); + return FutureUtil.failedFuture(new BrokerServiceException + .ServiceUnitNotReadyException(errorMessage, t)); + } catch (Exception ex) { + log.error("[{}] Transaction buffer recover fail when read " + + "transactionBufferSnapshot!", topic.getName(), ex); + return FutureUtil.failedFuture(ex); + } finally { + assert snapshotReader != null; + closeReader(snapshotReader); + } + return CompletableFuture.completedFuture(startReadCursorPositionInOldSnapshot); + }, + topic.getBrokerService().getPulsar().getTransactionExecutorProvider() + .getExecutor(this)); + } + }); + } + + // This method will be deprecated and removed in version 4.x.0 + private void handleOldSnapshot(TransactionBufferSnapshot snapshot) { + if (snapshot.getAborts() != null) { + snapshot.getAborts().forEach(abortTxnMetadata -> { + TxnID txnID = new TxnID(abortTxnMetadata.getTxnIdMostBits(), + abortTxnMetadata.getTxnIdLeastBits()); + aborts.put(txnID, txnID); + //The old data will be written into the first segment. + unsealedTxnIds.add(txnID); + }); + } + } + @Override public CompletableFuture clearAbortedTxnSnapshot() { return persistentWorker.appendTask(PersistentWorker.OperationType.Clear, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java index 89a8e95afba1f..b5e4b6d0f901a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java @@ -37,6 +37,7 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.map.LinkedMap; import org.apache.pulsar.broker.service.BrokerServiceException; @@ -165,13 +166,15 @@ public void handleTxnEntry(Entry entry) { if (msgMetadata != null && msgMetadata.hasTxnidMostBits() && msgMetadata.hasTxnidLeastBits()) { TxnID txnID = new TxnID(msgMetadata.getTxnidMostBits(), msgMetadata.getTxnidLeastBits()); PositionImpl position = PositionImpl.get(entry.getLedgerId(), entry.getEntryId()); - if (Markers.isTxnMarker(msgMetadata)) { - if (Markers.isTxnAbortMarker(msgMetadata)) { - snapshotAbortedTxnProcessor.putAbortedTxnAndPosition(txnID, position); + synchronized (TopicTransactionBuffer.this) { + if (Markers.isTxnMarker(msgMetadata)) { + if (Markers.isTxnAbortMarker(msgMetadata)) { + snapshotAbortedTxnProcessor.putAbortedTxnAndPosition(txnID, position); + } + updateMaxReadPosition(txnID); + } else { + handleTransactionMessage(txnID, position); } - updateMaxReadPosition(txnID); - } else { - handleTransactionMessage(txnID, position); } } } @@ -282,8 +285,8 @@ private void handleTransactionMessage(TxnID txnId, Position position) { .checkAbortedTransaction(txnId)) { ongoingTxns.put(txnId, (PositionImpl) position); PositionImpl firstPosition = ongoingTxns.get(ongoingTxns.firstKey()); - //max read position is less than first ongoing transaction message position, so entryId -1 - maxReadPosition = PositionImpl.get(firstPosition.getLedgerId(), firstPosition.getEntryId() - 1); + // max read position is less than first ongoing transaction message position + maxReadPosition = ((ManagedLedgerImpl) topic.getManagedLedger()).getPreviousPosition(firstPosition); } } @@ -357,10 +360,10 @@ public void addComplete(Position position, ByteBuf entryData, Object ctx) { updateMaxReadPosition(txnID); snapshotAbortedTxnProcessor.trimExpiredAbortedTxns(); takeSnapshotByChangeTimes(); + txnAbortedCounter.increment(); + completableFuture.complete(null); + handleLowWaterMark(txnID, lowWaterMark); } - txnAbortedCounter.increment(); - completableFuture.complete(null); - handleLowWaterMark(txnID, lowWaterMark); } @Override @@ -469,7 +472,7 @@ public CompletableFuture closeAsync() { } @Override - public boolean isTxnAborted(TxnID txnID, PositionImpl readPosition) { + public synchronized boolean isTxnAborted(TxnID txnID, PositionImpl readPosition) { return snapshotAbortedTxnProcessor.checkAbortedTransaction(txnID); } @@ -501,9 +504,11 @@ public PositionImpl getMaxReadPosition() { @Override public TransactionInBufferStats getTransactionInBufferStats(TxnID txnID) { TransactionInBufferStats transactionInBufferStats = new TransactionInBufferStats(); - transactionInBufferStats.aborted = isTxnAborted(txnID, null); - if (ongoingTxns.containsKey(txnID)) { - transactionInBufferStats.startPosition = ongoingTxns.get(txnID).toString(); + synchronized (this) { + transactionInBufferStats.aborted = isTxnAborted(txnID, null); + if (ongoingTxns.containsKey(txnID)) { + transactionInBufferStats.startPosition = ongoingTxns.get(txnID).toString(); + } } return transactionInBufferStats; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java index 48dcf259edb1b..9aac9ab64d0fd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java @@ -61,6 +61,8 @@ public class TransactionBufferHandlerImpl implements TransactionBufferHandler { private final PulsarService pulsarService; private final PulsarClientImpl pulsarClient; + private final int randomKeyForSelectConnection; + private static final AtomicIntegerFieldUpdater REQUEST_CREDITS_UPDATER = AtomicIntegerFieldUpdater.newUpdater(TransactionBufferHandlerImpl.class, "requestCredits"); private volatile int requestCredits; @@ -74,6 +76,7 @@ public TransactionBufferHandlerImpl(PulsarService pulsarService, HashedWheelTime this.operationTimeoutInMills = operationTimeoutInMills; this.timer = timer; this.requestCredits = Math.max(100, maxConcurrentRequests); + this.randomKeyForSelectConnection = pulsarClient.getCnxPool().genRandomKeyToSelectCon(); } @Override @@ -134,8 +137,9 @@ public void endTxn(OpRequestSend op) { if (clientCnx.ctx().channel().isActive()) { clientCnx.registerTransactionBufferHandler(TransactionBufferHandlerImpl.this); outstandingRequests.put(op.requestId, op); + final long requestId = op.requestId; timer.newTimeout(timeout -> { - OpRequestSend peek = outstandingRequests.remove(op.requestId); + OpRequestSend peek = outstandingRequests.remove(requestId); if (peek != null && !peek.cb.isDone() && !peek.cb.isCompletedExceptionally()) { peek.cb.completeExceptionally(new TransactionBufferClientException .RequestTimeoutException()); @@ -296,7 +300,7 @@ protected OpRequestSend newObject(Handle handle) { } public CompletableFuture getClientCnxWithLookup(String topic) { - return pulsarClient.getConnection(topic); + return pulsarClient.getConnection(topic, randomKeyForSelectConnection); } public CompletableFuture getClientCnx(String topic) { @@ -317,7 +321,8 @@ public CompletableFuture getClientCnx(String topic) { } InetSocketAddress brokerAddress = InetSocketAddress.createUnresolved(uri.getHost(), uri.getPort()); - return pulsarClient.getConnection(brokerAddress, brokerAddress); + return pulsarClient.getConnection(brokerAddress, brokerAddress, + randomKeyForSelectConnection); } else { // Bundle is unloading, lookup topic return getClientCnxWithLookup(topic); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java index ecc6599ce52b5..5308648b80c1d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java @@ -159,7 +159,7 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { , originPersistentTopic.getName(), subscription.getName(), exception); pendingAckStoreFuture.completeExceptionally(exception); } - }, () -> true, null); + }, () -> CompletableFuture.completedFuture(true), null); }).exceptionally(e -> { Throwable t = FutureUtil.unwrapCompletionException(e); log.error("[{}] [{}] Failed to get managedLedger config when init pending ack store!", diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java index 3d23d7812543a..c2ac73d39f559 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java @@ -56,7 +56,9 @@ import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationParameters; import org.apache.pulsar.broker.authorization.AuthorizationService; +import org.apache.pulsar.broker.loadbalance.LoadManager; import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.namespace.LookupOptions; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.resources.BookieResources; @@ -92,6 +94,7 @@ import org.apache.pulsar.common.policies.path.PolicyPath; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.coordination.LockManager; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import org.slf4j.Logger; @@ -528,6 +531,7 @@ protected static CompletableFuture getClusterDataIfDifferentCluster pulsar.getPulsarResources().getClusterResources().getClusterAsync(cluster) .whenComplete((clusterDataResult, ex) -> { if (ex != null) { + log.warn("[{}] Load cluster data failed: requested={}", clientAppId, cluster); clusterDataFuture.completeExceptionally(FutureUtil.unwrapCompletionException(ex)); return; } @@ -717,19 +721,19 @@ public CompletableFuture validateBundleOwnershipAsync(NamespaceBundle bund throw new RestException(Status.PRECONDITION_FAILED, "Failed to find ownership for ServiceUnit:" + bundle.toString()); } - // If the load manager is extensible load manager, we don't need check the authoritative. - if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config())) { - return CompletableFuture.completedFuture(null); - } return nsService.isServiceUnitOwnedAsync(bundle) .thenAccept(owned -> { if (!owned) { boolean newAuthoritative = this.isLeaderBroker(); // Replace the host and port of the current request and redirect - URI redirect = UriBuilder.fromUri(uri.getRequestUri()).host(webUrl.get().getHost()) - .port(webUrl.get().getPort()).replaceQueryParam("authoritative", - newAuthoritative).replaceQueryParam("destinationBroker", - null).build(); + UriBuilder uriBuilder = UriBuilder.fromUri(uri.getRequestUri()) + .host(webUrl.get().getHost()) + .port(webUrl.get().getPort()) + .replaceQueryParam("authoritative", newAuthoritative); + if (!ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config())) { + uriBuilder.replaceQueryParam("destinationBroker", null); + } + URI redirect = uriBuilder.build(); log.debug("{} is not a service unit owned", bundle); // Redirect log.debug("Redirecting the rest call to {}", redirect); @@ -1198,24 +1202,43 @@ protected CompletableFuture canUpdateCluster(String tenant, Set ol /** * Redirect the call to the specified broker. * - * @param broker - * Broker name + * @param brokerId broker's id (lookup service address) */ - protected void validateBrokerName(String broker) { - String brokerUrl = String.format("http://%s", broker); - String brokerUrlTls = String.format("https://%s", broker); - if (!brokerUrl.equals(pulsar().getSafeWebServiceAddress()) - && !brokerUrlTls.equals(pulsar().getWebServiceAddressTls())) { - String[] parts = broker.split(":"); - checkArgument(parts.length == 2, String.format("Invalid broker url %s", broker)); - String host = parts[0]; - int port = Integer.parseInt(parts[1]); - - URI redirect = UriBuilder.fromUri(uri.getRequestUri()).host(host).port(port).build(); - log.debug("[{}] Redirecting the rest call to {}: broker={}", clientAppId(), redirect, broker); - throw new WebApplicationException(Response.temporaryRedirect(redirect).build()); - + protected CompletableFuture maybeRedirectToBroker(String brokerId) { + // backwards compatibility + String cleanedBrokerId = brokerId.replaceFirst("http[s]?://", ""); + if (pulsar.getBrokerId().equals(cleanedBrokerId) + // backwards compatibility + || ("http://" + cleanedBrokerId).equals(pulsar().getWebServiceAddress()) + || ("https://" + cleanedBrokerId).equals(pulsar().getWebServiceAddressTls())) { + // no need to redirect, the current broker matches the given broker id + return CompletableFuture.completedFuture(null); } + LockManager brokerLookupDataLockManager = + pulsar().getCoordinationService().getLockManager(BrokerLookupData.class); + return brokerLookupDataLockManager.readLock(LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + cleanedBrokerId) + .thenAccept(brokerLookupDataOptional -> { + if (brokerLookupDataOptional.isEmpty()) { + throw new RestException(Status.NOT_FOUND, + "Broker id '" + brokerId + "' not found in available brokers."); + } + brokerLookupDataOptional.ifPresent(brokerLookupData -> { + URI targetBrokerUri; + if ((isRequestHttps() || StringUtils.isBlank(brokerLookupData.getWebServiceUrl())) + && StringUtils.isNotBlank(brokerLookupData.getWebServiceUrlTls())) { + targetBrokerUri = URI.create(brokerLookupData.getWebServiceUrlTls()); + } else { + targetBrokerUri = URI.create(brokerLookupData.getWebServiceUrl()); + } + URI redirect = UriBuilder.fromUri(uri.getRequestUri()) + .scheme(targetBrokerUri.getScheme()) + .host(targetBrokerUri.getHost()) + .port(targetBrokerUri.getPort()).build(); + log.debug("[{}] Redirecting the rest call to {}: broker={}", clientAppId(), redirect, + cleanedBrokerId); + throw new WebApplicationException(Response.temporaryRedirect(redirect).build()); + }); + }); } public void validateTopicPolicyOperation(TopicName topicName, PolicyName policy, PolicyOperation operation) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java index 4d718f71a2e23..875c30f0f5e13 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java @@ -42,10 +42,20 @@ public class RawBatchConverter { public static boolean isReadableBatch(RawMessage msg) { ByteBuf payload = msg.getHeadersAndPayload(); MessageMetadata metadata = Commands.parseMessageMetadata(payload); + return isReadableBatch(metadata); + } + + public static boolean isReadableBatch(MessageMetadata metadata) { return metadata.hasNumMessagesInBatch() && metadata.getEncryptionKeysCount() == 0; } public static List> extractIdsAndKeysAndSize(RawMessage msg) + throws IOException { + return extractIdsAndKeysAndSize(msg, true); + } + + public static List> extractIdsAndKeysAndSize(RawMessage msg, + boolean extractNullKey) throws IOException { checkArgument(msg.getMessageIdData().getBatchIndex() == -1); @@ -69,7 +79,7 @@ public static List> extractIdsAndKey msg.getMessageIdData().getEntryId(), msg.getMessageIdData().getPartition(), i); - if (!smm.isCompactedOut()) { + if (!smm.isCompactedOut() && (extractNullKey || smm.hasPartitionKey())) { idsAndKeysAndSize.add(ImmutableTriple.of(id, smm.hasPartitionKey() ? smm.getPartitionKey() : null, smm.hasPayloadSize() ? smm.getPayloadSize() : 0)); @@ -80,6 +90,11 @@ public static List> extractIdsAndKey return idsAndKeysAndSize; } + public static Optional rebatchMessage(RawMessage msg, + BiPredicate filter) throws IOException { + return rebatchMessage(msg, filter, true); + } + /** * Take a batched message and a filter, and returns a message with the only the sub-messages * which match the filter. Returns an empty optional if no messages match. @@ -87,7 +102,8 @@ public static List> extractIdsAndKey * NOTE: this message does not alter the reference count of the RawMessage argument. */ public static Optional rebatchMessage(RawMessage msg, - BiPredicate filter) + BiPredicate filter, + boolean retainNullKey) throws IOException { checkArgument(msg.getMessageIdData().getBatchIndex() == -1); @@ -114,10 +130,19 @@ public static Optional rebatchMessage(RawMessage msg, msg.getMessageIdData().getEntryId(), msg.getMessageIdData().getPartition(), i); - if (!singleMessageMetadata.hasPartitionKey()) { - messagesRetained++; - Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata, - singleMessagePayload, batchBuffer); + if (singleMessageMetadata.isCompactedOut()) { + // we may read compacted out message from the compacted topic + Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata, + Unpooled.EMPTY_BUFFER, batchBuffer); + } else if (!singleMessageMetadata.hasPartitionKey()) { + if (retainNullKey) { + messagesRetained++; + Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata, + singleMessagePayload, batchBuffer); + } else { + Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata, + Unpooled.EMPTY_BUFFER, batchBuffer); + } } else if (filter.test(singleMessageMetadata.getPartitionKey(), id) && singleMessagePayload.readableBytes() > 0) { messagesRetained++; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java index cf6b213155cd7..374f1e30c0a89 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.MessageCrypto; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.impl.crypto.MessageCryptoBc; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; @@ -44,17 +45,17 @@ * [(k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3)] */ public class RawBatchMessageContainerImpl extends BatchMessageContainerImpl { - MessageCrypto msgCrypto; - Set encryptionKeys; - CryptoKeyReader cryptoKeyReader; - public RawBatchMessageContainerImpl(int maxNumMessagesInBatch) { + private MessageCrypto msgCrypto; + private Set encryptionKeys; + private CryptoKeyReader cryptoKeyReader; + private MessageIdAdv lastAddedMessageId; + + public RawBatchMessageContainerImpl() { super(); - compressionType = CompressionType.NONE; - compressor = new CompressionCodecNone(); - if (maxNumMessagesInBatch > 0) { - this.maxNumMessagesInBatch = maxNumMessagesInBatch; - } + this.compressionType = CompressionType.NONE; + this.compressor = new CompressionCodecNone(); } + private ByteBuf encrypt(ByteBuf compressedPayload) { if (msgCrypto == null) { return compressedPayload; @@ -90,6 +91,28 @@ public void setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) { this.cryptoKeyReader = cryptoKeyReader; } + @Override + public boolean add(MessageImpl msg, SendCallback callback) { + this.lastAddedMessageId = (MessageIdAdv) msg.getMessageId(); + return super.add(msg, callback); + } + + @Override + protected boolean isBatchFull() { + return false; + } + + @Override + public boolean haveEnoughSpace(MessageImpl msg) { + if (lastAddedMessageId == null) { + return true; + } + // Keep same batch compact to same batch. + MessageIdAdv msgId = (MessageIdAdv) msg.getMessageId(); + return msgId.getLedgerId() == lastAddedMessageId.getLedgerId() + && msgId.getEntryId() == lastAddedMessageId.getEntryId(); + } + /** * Serializes the batched messages and return the ByteBuf. * It sets the CompressionType and Encryption Keys from the batched messages. @@ -164,8 +187,15 @@ public ByteBuf toByteBuf() { idData.writeTo(buf); buf.writeInt(metadataAndPayload.readableBytes()); buf.writeBytes(metadataAndPayload); + metadataAndPayload.release(); encryptedPayload.release(); clear(); return buf; } + + @Override + public void clear() { + this.lastAddedMessageId = null; + super.clear(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java index 9a1c972b2cc98..f65232413991f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java @@ -59,6 +59,7 @@ public RawReaderImpl(PulsarClientImpl client, String topic, String subscription, consumerConfiguration.setReceiverQueueSize(DEFAULT_RECEIVER_QUEUE_SIZE); consumerConfiguration.setReadCompacted(true); consumerConfiguration.setSubscriptionInitialPosition(SubscriptionInitialPosition.Earliest); + consumerConfiguration.setAckReceiptEnabled(true); consumer = new RawConsumerImpl(client, consumerConfiguration, consumerFuture); @@ -122,7 +123,7 @@ static class RawConsumerImpl extends ConsumerImpl { MessageId.earliest, 0 /* startMessageRollbackDurationInSec */, Schema.BYTES, null, - true + false ); incomingRawMessages = new GrowableArrayBlockingQueue<>(); pendingRawReceives = new ConcurrentLinkedQueue<>(); @@ -151,14 +152,13 @@ void tryCompletePending() { // TODO message validation numMsg = 1; } + MessageIdData messageId = messageAndCnx.msg.getMessageIdData(); + lastDequeuedMessageId = new BatchMessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), + messageId.getPartition(), numMsg - 1); if (!future.complete(messageAndCnx.msg)) { messageAndCnx.msg.close(); closeAsync(); } - MessageIdData messageId = messageAndCnx.msg.getMessageIdData(); - lastDequeuedMessageId = new BatchMessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), - messageId.getPartition(), numMsg - 1); - ClientCnx currentCnx = cnx(); if (currentCnx == messageAndCnx.cnx) { increaseAvailablePermits(currentCnx, numMsg); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java index e1a10b3bbb212..015df8582a2e2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java @@ -24,13 +24,16 @@ import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.Consumer; public interface CompactedTopic { CompletableFuture newCompactedLedger(Position p, long compactedLedgerId); CompletableFuture deleteCompactedLedger(long compactedLedgerId); void asyncReadEntriesOrWait(ManagedCursor cursor, - int numberOfEntriesToRead, + int maxEntries, + long bytesToRead, + PositionImpl maxReadPosition, boolean isFirstRead, ReadEntriesCallback callback, Consumer consumer); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java index c8114f9adb652..2252065413559 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java @@ -32,6 +32,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import javax.annotation.Nullable; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerEntry; @@ -42,6 +43,7 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.EntryImpl; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.Consumer; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherSingleActiveConsumer.ReadEntriesCtx; @@ -87,7 +89,9 @@ public CompletableFuture deleteCompactedLedger(long compactedLedgerId) { @Override public void asyncReadEntriesOrWait(ManagedCursor cursor, - int numberOfEntriesToRead, + int maxEntries, + long bytesToRead, + PositionImpl maxReadPosition, boolean isFirstRead, ReadEntriesCallback callback, Consumer consumer) { synchronized (this) { @@ -102,8 +106,11 @@ public void asyncReadEntriesOrWait(ManagedCursor cursor, ReadEntriesCtx readEntriesCtx = ReadEntriesCtx.create(consumer, DEFAULT_CONSUMER_EPOCH); if (compactionHorizon == null || compactionHorizon.compareTo(cursorPosition) < 0) { - cursor.asyncReadEntriesOrWait(numberOfEntriesToRead, callback, readEntriesCtx, PositionImpl.LATEST); + cursor.asyncReadEntriesOrWait(maxEntries, bytesToRead, callback, readEntriesCtx, maxReadPosition); } else { + ManagedCursorImpl managedCursor = (ManagedCursorImpl) cursor; + int numberOfEntriesToRead = managedCursor.applyMaxSizeCap(maxEntries, bytesToRead); + compactedTopicContext.thenCompose( (context) -> findStartPoint(cursorPosition, context.ledger.getLastAddConfirmed(), context.cache) .thenCompose((startPoint) -> { @@ -120,7 +127,7 @@ public void asyncReadEntriesOrWait(ManagedCursor cursor, return CompletableFuture.completedFuture(null); } else { long endPoint = Math.min(context.ledger.getLastAddConfirmed(), - startPoint + numberOfEntriesToRead); + startPoint + (numberOfEntriesToRead - 1)); if (startPoint == NEWER_THAN_COMPACTED) { cursor.seek(compactionHorizon.getNext()); callback.readEntriesComplete(Collections.emptyList(), readEntriesCtx); @@ -128,6 +135,12 @@ public void asyncReadEntriesOrWait(ManagedCursor cursor, } return readEntries(context.ledger, startPoint, endPoint) .thenAccept((entries) -> { + long entriesSize = 0; + for (Entry entry : entries) { + entriesSize += entry.getLength(); + } + managedCursor.updateReadStats(entries.size(), entriesSize); + Entry lastEntry = entries.get(entries.size() - 1); // The compaction task depends on the last snapshot and the incremental // entries to build the new snapshot. So for the compaction cursor, we @@ -320,6 +333,16 @@ private static int comparePositionAndMessageId(PositionImpl p, MessageIdData m) public synchronized Optional getCompactionHorizon() { return Optional.ofNullable(this.compactionHorizon); } + + public void reset() { + this.compactionHorizon = null; + this.compactedTopicContext = null; + } + + @Nullable + public CompletableFuture getCompactedTopicContextFuture() { + return compactedTopicContext; + } private static final Logger log = LoggerFactory.getLogger(CompactedTopicImpl.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java index 37b03e275d6bf..fefa2ee959cc5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java @@ -69,20 +69,12 @@ public class StrategicTwoPhaseCompactor extends TwoPhaseCompactor { public StrategicTwoPhaseCompactor(ServiceConfiguration conf, PulsarClient pulsar, BookKeeper bk, - ScheduledExecutorService scheduler, - int maxNumMessagesInBatch) { + ScheduledExecutorService scheduler) { super(conf, pulsar, bk, scheduler); - batchMessageContainer = new RawBatchMessageContainerImpl(maxNumMessagesInBatch); + batchMessageContainer = new RawBatchMessageContainerImpl(); phaseOneLoopReadTimeout = Duration.ofSeconds(conf.getBrokerServiceCompactionPhaseOneLoopTimeInSeconds()); } - public StrategicTwoPhaseCompactor(ServiceConfiguration conf, - PulsarClient pulsar, - BookKeeper bk, - ScheduledExecutorService scheduler) { - this(conf, pulsar, bk, scheduler, -1); - } - public CompletableFuture compact(String topic) { throw new UnsupportedOperationException(); } @@ -366,7 +358,7 @@ private CompletableFuture runPhaseTwo( }); }) .thenCompose(v -> { - log.info("Acking ledger id {}", phaseOneResult.firstId); + log.info("Acking ledger id {}", phaseOneResult.lastId); return ((CompactionReaderImpl) reader) .acknowledgeCumulativeAsync( phaseOneResult.lastId, Map.of(COMPACTED_TOPIC_LEDGER_PROPERTY, @@ -405,7 +397,6 @@ private void phaseTwoLoop(String topic, Iterator> reader, .whenComplete((res, exception2) -> { if (exception2 != null) { promise.completeExceptionally(exception2); - return; } }); phaseTwoLoop(topic, reader, lh, outstanding, promise); @@ -430,35 +421,45 @@ private void phaseTwoLoop(String topic, Iterator> reader, CompletableFuture addToCompactedLedger( LedgerHandle lh, Message m, String topic, Semaphore outstanding) { + if (m == null) { + return flushBatchMessage(lh, topic, outstanding); + } + if (batchMessageContainer.haveEnoughSpace((MessageImpl) m)) { + batchMessageContainer.add((MessageImpl) m, null); + return CompletableFuture.completedFuture(false); + } + CompletableFuture f = flushBatchMessage(lh, topic, outstanding); + batchMessageContainer.add((MessageImpl) m, null); + return f; + } + + private CompletableFuture flushBatchMessage(LedgerHandle lh, String topic, + Semaphore outstanding) { + if (batchMessageContainer.getNumMessagesInBatch() <= 0) { + return CompletableFuture.completedFuture(false); + } CompletableFuture bkf = new CompletableFuture<>(); - if (m == null || batchMessageContainer.add((MessageImpl) m, null)) { - if (batchMessageContainer.getNumMessagesInBatch() > 0) { - try { - ByteBuf serialized = batchMessageContainer.toByteBuf(); - outstanding.acquire(); - mxBean.addCompactionWriteOp(topic, serialized.readableBytes()); - long start = System.nanoTime(); - lh.asyncAddEntry(serialized, - (rc, ledger, eid, ctx) -> { - outstanding.release(); - mxBean.addCompactionLatencyOp(topic, System.nanoTime() - start, TimeUnit.NANOSECONDS); - if (rc != BKException.Code.OK) { - bkf.completeExceptionally(BKException.create(rc)); - } else { - bkf.complete(true); - } - }, null); + try { + ByteBuf serialized = batchMessageContainer.toByteBuf(); + outstanding.acquire(); + mxBean.addCompactionWriteOp(topic, serialized.readableBytes()); + long start = System.nanoTime(); + lh.asyncAddEntry(serialized, + (rc, ledger, eid, ctx) -> { + outstanding.release(); + mxBean.addCompactionLatencyOp(topic, System.nanoTime() - start, TimeUnit.NANOSECONDS); + if (rc != BKException.Code.OK) { + bkf.completeExceptionally(BKException.create(rc)); + } else { + bkf.complete(true); + } + }, null); - } catch (Throwable t) { - log.error("Failed to add entry", t); - batchMessageContainer.discard((Exception) t); - return FutureUtil.failedFuture(t); - } - } else { - bkf.complete(false); - } - } else { - bkf.complete(false); + } catch (Throwable t) { + log.error("Failed to add entry", t); + batchMessageContainer.discard((Exception) t); + bkf.completeExceptionally(t); + return bkf; } return bkf; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java index 821dd9c0c9d23..8455638eed4e3 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java @@ -43,6 +43,7 @@ import org.apache.pulsar.client.impl.RawBatchConverter; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.common.protocol.Markers; import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,6 +62,7 @@ public class TwoPhaseCompactor extends Compactor { private static final int MAX_OUTSTANDING = 500; protected static final String COMPACTED_TOPIC_LEDGER_PROPERTY = "CompactedTopicLedger"; private final Duration phaseOneLoopReadTimeout; + private final boolean topicCompactionRetainNullKey; public TwoPhaseCompactor(ServiceConfiguration conf, PulsarClient pulsar, @@ -68,6 +70,7 @@ public TwoPhaseCompactor(ServiceConfiguration conf, ScheduledExecutorService scheduler) { super(conf, pulsar, bk, scheduler); phaseOneLoopReadTimeout = Duration.ofSeconds(conf.getBrokerServiceCompactionPhaseOneLoopTimeInSeconds()); + topicCompactionRetainNullKey = conf.isTopicCompactionRetainNullKey(); } @Override @@ -126,22 +129,39 @@ private void phaseOneLoop(RawReader reader, boolean deletedMessage = false; boolean replaceMessage = false; mxBean.addCompactionReadOp(reader.getTopic(), m.getHeadersAndPayload().readableBytes()); - if (RawBatchConverter.isReadableBatch(m)) { + MessageMetadata metadata = Commands.parseMessageMetadata(m.getHeadersAndPayload()); + if (Markers.isServerOnlyMarker(metadata)) { + mxBean.addCompactionRemovedEvent(reader.getTopic()); + deletedMessage = true; + } else if (RawBatchConverter.isReadableBatch(metadata)) { try { + int numMessagesInBatch = metadata.getNumMessagesInBatch(); + int deleteCnt = 0; for (ImmutableTriple e : RawBatchConverter - .extractIdsAndKeysAndSize(m)) { + .extractIdsAndKeysAndSize(m, true)) { if (e != null) { + if (e.getMiddle() == null) { + if (!topicCompactionRetainNullKey) { + // record delete null-key message event + deleteCnt++; + mxBean.addCompactionRemovedEvent(reader.getTopic()); + } + continue; + } if (e.getRight() > 0) { MessageId old = latestForKey.put(e.getMiddle(), e.getLeft()); - replaceMessage = old != null; + if (old != null) { + mxBean.addCompactionRemovedEvent(reader.getTopic()); + } } else { - deletedMessage = true; latestForKey.remove(e.getMiddle()); + deleteCnt++; + mxBean.addCompactionRemovedEvent(reader.getTopic()); } } - if (replaceMessage || deletedMessage) { - mxBean.addCompactionRemovedEvent(reader.getTopic()); - } + } + if (deleteCnt == numMessagesInBatch) { + deletedMessage = true; } } catch (IOException ioe) { log.info("Error decoding batch for message {}. Whole batch will be included in output", @@ -157,6 +177,10 @@ private void phaseOneLoop(RawReader reader, deletedMessage = true; latestForKey.remove(keyAndSize.getLeft()); } + } else { + if (!topicCompactionRetainNullKey) { + deletedMessage = true; + } } if (replaceMessage || deletedMessage) { mxBean.addCompactionRemovedEvent(reader.getTopic()); @@ -201,7 +225,7 @@ private CompletableFuture phaseTwoSeekThenLoop(RawReader reader, MessageId reader.seekAsync(from).thenCompose((v) -> { Semaphore outstanding = new Semaphore(MAX_OUTSTANDING); CompletableFuture loopPromise = new CompletableFuture<>(); - phaseTwoLoop(reader, to, latestForKey, ledger, outstanding, loopPromise); + phaseTwoLoop(reader, to, latestForKey, ledger, outstanding, loopPromise, MessageId.earliest); return loopPromise; }).thenCompose((v) -> closeLedger(ledger)) .thenCompose((v) -> reader.acknowledgeCumulativeAsync(lastReadId, @@ -223,7 +247,8 @@ private CompletableFuture phaseTwoSeekThenLoop(RawReader reader, MessageId } private void phaseTwoLoop(RawReader reader, MessageId to, Map latestForKey, - LedgerHandle lh, Semaphore outstanding, CompletableFuture promise) { + LedgerHandle lh, Semaphore outstanding, CompletableFuture promise, + MessageId lastCompactedMessageId) { if (promise.isDone()) { return; } @@ -232,14 +257,23 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map m.close(); return; } + + if (m.getMessageId().compareTo(lastCompactedMessageId) <= 0) { + phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise, lastCompactedMessageId); + return; + } + try { MessageId id = m.getMessageId(); Optional messageToAdd = Optional.empty(); mxBean.addCompactionReadOp(reader.getTopic(), m.getHeadersAndPayload().readableBytes()); - if (RawBatchConverter.isReadableBatch(m)) { + MessageMetadata metadata = Commands.parseMessageMetadata(m.getHeadersAndPayload()); + if (Markers.isServerOnlyMarker(metadata)) { + messageToAdd = Optional.empty(); + } else if (RawBatchConverter.isReadableBatch(metadata)) { try { messageToAdd = RawBatchConverter.rebatchMessage( - m, (key, subid) -> subid.equals(latestForKey.get(key))); + m, (key, subid) -> subid.equals(latestForKey.get(key)), topicCompactionRetainNullKey); } catch (IOException ioe) { log.info("Error decoding batch for message {}. Whole batch will be included in output", id, ioe); @@ -248,8 +282,8 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map } else { Pair keyAndSize = extractKeyAndSize(m); MessageId msg; - if (keyAndSize == null) { // pass through messages without a key - messageToAdd = Optional.of(m); + if (keyAndSize == null) { + messageToAdd = topicCompactionRetainNullKey ? Optional.of(m) : Optional.empty(); } else if ((msg = latestForKey.get(keyAndSize.getLeft())) != null && msg.equals(id)) { // consider message only if present into latestForKey map if (keyAndSize.getRight() <= 0) { @@ -272,11 +306,14 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map } }); if (to.equals(id)) { + // make sure all inflight writes have finished + outstanding.acquire(MAX_OUTSTANDING); addFuture.whenComplete((res, exception2) -> { if (exception2 == null) { promise.complete(null); } }); + return; } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -299,7 +336,7 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map } return; } - phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise); + phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise, m.getMessageId()); } finally { m.close(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java b/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java index d73d1d7ed6bed..63d146a3a1521 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java @@ -52,7 +52,6 @@ import org.apache.bookkeeper.clients.exceptions.NamespaceExistsException; import org.apache.bookkeeper.clients.exceptions.NamespaceNotFoundException; import org.apache.bookkeeper.common.allocator.PoolingPolicy; -import org.apache.bookkeeper.common.component.ComponentStarter; import org.apache.bookkeeper.common.component.LifecycleComponent; import org.apache.bookkeeper.common.component.LifecycleComponentStack; import org.apache.bookkeeper.common.concurrent.FutureUtils; @@ -132,7 +131,7 @@ public LocalBookkeeperEnsemble(int numberOfBookies, boolean clearOldData, String advertisedAddress) { this(numberOfBookies, zkPort, streamStoragePort, zkDataDirName, bkDataDirName, clearOldData, advertisedAddress, - new BasePortManager(bkBasePort)); + bkBasePort != 0 ? new BasePortManager(bkBasePort) : () -> 0); } public LocalBookkeeperEnsemble(int numberOfBookies, @@ -311,6 +310,7 @@ private void runBookies(ServerConfiguration baseConf) throws Exception { bsConfs[i] = new ServerConfiguration(baseConf); // override settings bsConfs[i].setBookiePort(bookiePort); + bsConfs[i].setBookieId("bk" + i + "test"); String zkServers = "127.0.0.1:" + zkPort; String metadataServiceUriStr = "zk://" + zkServers + "/ledgers"; @@ -455,8 +455,10 @@ public void startBK(int i) throws Exception { try { bookieComponents[i] = org.apache.bookkeeper.server.Main .buildBookieServer(new BookieConfiguration(bsConfs[i])); - ComponentStarter.startComponent(bookieComponents[i]); + bookieComponents[i].start(); } catch (BookieException.InvalidCookieException ice) { + LOG.warn("Invalid cookie found for bookie {}", i, ice); + // InvalidCookieException can happen if the machine IP has changed // Since we are running here a local bookie that is always accessed // from localhost, we can ignore the error @@ -473,7 +475,7 @@ public void startBK(int i) throws Exception { bookieComponents[i] = org.apache.bookkeeper.server.Main .buildBookieServer(new BookieConfiguration(bsConfs[i])); - ComponentStarter.startComponent(bookieComponents[i]); + bookieComponents[i].start(); } diff --git a/pulsar-broker/src/main/proto/DelayedMessageIndexBucketSnapshotFormat.proto b/pulsar-broker/src/main/proto/DelayedMessageIndexBucketMetadata.proto similarity index 85% rename from pulsar-broker/src/main/proto/DelayedMessageIndexBucketSnapshotFormat.proto rename to pulsar-broker/src/main/proto/DelayedMessageIndexBucketMetadata.proto index 6996b860c5249..01b770c567d09 100644 --- a/pulsar-broker/src/main/proto/DelayedMessageIndexBucketSnapshotFormat.proto +++ b/pulsar-broker/src/main/proto/DelayedMessageIndexBucketMetadata.proto @@ -21,12 +21,7 @@ syntax = "proto2"; package pulsar.delay; option java_package = "org.apache.pulsar.broker.delayed.proto"; option optimize_for = SPEED; - -message DelayedIndex { - required uint64 timestamp = 1; - required uint64 ledger_id = 2; - required uint64 entry_id = 3; -} +option java_multiple_files = true; message SnapshotSegmentMetadata { map delayed_index_bit_map = 1; @@ -34,10 +29,6 @@ message SnapshotSegmentMetadata { required uint64 min_schedule_timestamp = 3; } -message SnapshotSegment { - repeated DelayedIndex indexes = 1; -} - message SnapshotMetadata { repeated SnapshotSegmentMetadata metadata_list = 1; } diff --git a/pulsar-broker/src/main/proto/DelayedMessageIndexBucketSegment.proto b/pulsar-broker/src/main/proto/DelayedMessageIndexBucketSegment.proto new file mode 100644 index 0000000000000..a6ed30cfe8cd4 --- /dev/null +++ b/pulsar-broker/src/main/proto/DelayedMessageIndexBucketSegment.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +syntax = "proto2"; + +package pulsar.delay; +option java_package = "org.apache.pulsar.broker.delayed.proto"; +option optimize_for = SPEED; +option java_multiple_files = true; + +message DelayedIndex { + required uint64 timestamp = 1; + required uint64 ledger_id = 2; + required uint64 entry_id = 3; +} + +message SnapshotSegment { + repeated DelayedIndex indexes = 1; +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/RoaringbitmapTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/RoaringbitmapTest.java new file mode 100644 index 0000000000000..477e7413ef991 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/RoaringbitmapTest.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar; + +import static org.testng.Assert.assertTrue; +import org.roaringbitmap.RoaringBitmap; +import org.testng.annotations.Test; + +public class RoaringbitmapTest { + + @Test + public void testRoaringBitmapContains() { + RoaringBitmap roaringBitmap = new RoaringBitmap(); + for (long i = 1; i <= 100_000; i++) { + roaringBitmap.add(i, i + 1); + } + + for (long i = 1; i <= 100_000; i++) { + assertTrue(roaringBitmap.contains(i, i + 1)); + } + + RoaringBitmap roaringBitmap2 = new RoaringBitmap(); + for (long i = 1; i <= 1000_000; i++) { + roaringBitmap2.add(i, i + 1); + } + + for (long i = 1; i <= 1000_000; i++) { + assertTrue(roaringBitmap2.contains(i, i + 1)); + } + + RoaringBitmap roaringBitmap3 = new RoaringBitmap(); + for (long i = 1; i <= 10_000_000; i++) { + roaringBitmap3.add(i, i + 1); + } + + for (long i = 1; i <= 10_000_000; i++) { + assertTrue(roaringBitmap3.contains(i, i + 1)); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java index a02689dc9763a..9ff18baf95912 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java @@ -41,6 +41,7 @@ import org.apache.pulsar.bookie.rackawareness.BookieRackAffinityMapping; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.zookeeper.ZkIsolatedBookieEnsemblePlacementPolicy; import org.testng.annotations.Test; /** @@ -152,6 +153,24 @@ public void testSetDefaultEnsemblePlacementPolicyRackAwareEnabledChangedValues() assertEquals(20, bkConf.getMinNumRacksPerWriteQuorum()); } + @Test + public void testSetEnsemblePlacementPolicys() { + ClientConfiguration bkConf = new ClientConfiguration(); + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setBookkeeperClientMinNumRacksPerWriteQuorum(3); + conf.setBookkeeperClientEnforceMinNumRacksPerWriteQuorum(true); + + MetadataStore store = mock(MetadataStore.class); + + BookKeeperClientFactoryImpl.setEnsemblePlacementPolicy( + bkConf, + conf, + store, + ZkIsolatedBookieEnsemblePlacementPolicy.class); + assertEquals(bkConf.getMinNumRacksPerWriteQuorum(), 3); + assertTrue(bkConf.getEnforceMinNumRacksPerWriteQuorum()); + } + @Test public void testSetDiskWeightBasedPlacementEnabled() { BookKeeperClientFactoryImpl factory = new BookKeeperClientFactoryImpl(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/LedgerLostAndSkipNonRecoverableTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/LedgerLostAndSkipNonRecoverableTest.java new file mode 100644 index 0000000000000..389af8f2cd9f9 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/LedgerLostAndSkipNonRecoverableTest.java @@ -0,0 +1,296 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; +import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.common.util.FutureUtil; +import org.testcontainers.shaded.org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class LedgerLostAndSkipNonRecoverableTest extends ProducerConsumerBase { + + private static final String DEFAULT_NAMESPACE = "my-property/my-ns"; + + @BeforeClass + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + protected void doInitConf() throws Exception { + conf.setAutoSkipNonRecoverableData(true); + } + + @DataProvider(name = "batchEnabled") + public Object[][] batchEnabled(){ + return new Object[][]{ + {true}, + {false} + }; + } + + @Test(timeOut = 30000, dataProvider = "batchEnabled") + public void testMarkDeletedPositionCanForwardAfterTopicLedgerLost(boolean enabledBatch) throws Exception { + String topicSimpleName = UUID.randomUUID().toString().replaceAll("-", ""); + String subName = UUID.randomUUID().toString().replaceAll("-", ""); + String topicName = String.format("persistent://%s/%s", DEFAULT_NAMESPACE, topicSimpleName); + + log.info("create topic and subscription."); + Consumer sub = createConsumer(topicName, subName, enabledBatch); + sub.redeliverUnacknowledgedMessages(); + sub.close(); + + log.info("send many messages."); + int ledgerCount = 3; + int messageCountPerLedger = enabledBatch ? 25 : 5; + int messageCountPerEntry = enabledBatch ? 5 : 1; + List[] sendMessages = + sendManyMessages(topicName, ledgerCount, messageCountPerLedger, messageCountPerEntry); + int sendMessageCount = Arrays.asList(sendMessages).stream() + .flatMap(s -> s.stream()).collect(Collectors.toList()).size(); + log.info("send {} messages", sendMessageCount); + + log.info("make individual ack."); + ConsumerAndReceivedMessages consumerAndReceivedMessages1 = + waitConsumeAndAllMessages(topicName, subName, enabledBatch,false); + List[] messageIds = consumerAndReceivedMessages1.messageIds; + Consumer consumer = consumerAndReceivedMessages1.consumer; + MessageIdImpl individualPosition = messageIds[1].get(messageCountPerEntry - 1); + MessageIdImpl expectedMarkDeletedPosition = + new MessageIdImpl(messageIds[0].get(0).getLedgerId(), messageIds[0].get(0).getEntryId(), -1); + MessageIdImpl lastPosition = + new MessageIdImpl(messageIds[2].get(4).getLedgerId(), messageIds[2].get(4).getEntryId(), -1); + consumer.acknowledge(individualPosition); + consumer.acknowledge(expectedMarkDeletedPosition); + waitPersistentCursorLedger(topicName, subName, expectedMarkDeletedPosition.getLedgerId(), + expectedMarkDeletedPosition.getEntryId()); + consumer.close(); + + log.info("Make lost ledger [{}].", individualPosition.getLedgerId()); + pulsar.getBrokerService().getTopic(topicName, false).get().get().close(false); + pulsarTestContext.getMockBookKeeper().deleteLedger(individualPosition.getLedgerId()); + + log.info("send some messages."); + sendManyMessages(topicName, 3, messageCountPerEntry); + + log.info("receive all messages then verify mark deleted position"); + ConsumerAndReceivedMessages consumerAndReceivedMessages2 = + waitConsumeAndAllMessages(topicName, subName, enabledBatch, true); + waitMarkDeleteLargeAndEquals(topicName, subName, lastPosition.getLedgerId(), lastPosition.getEntryId()); + + // cleanup + consumerAndReceivedMessages2.consumer.close(); + admin.topics().delete(topicName); + } + + private ManagedCursorImpl getCursor(String topicName, String subName) throws Exception { + PersistentSubscription subscription_ = + (PersistentSubscription) pulsar.getBrokerService().getTopic(topicName, false) + .get().get().getSubscription(subName); + return (ManagedCursorImpl) subscription_.getCursor(); + } + + private void waitMarkDeleteLargeAndEquals(String topicName, String subName, final long markDeletedLedgerId, + final long markDeletedEntryId) throws Exception { + Awaitility.await().atMost(Duration.ofSeconds(45)).untilAsserted(() -> { + Position persistentMarkDeletedPosition = getCursor(topicName, subName).getMarkDeletedPosition(); + log.info("markDeletedPosition {}:{}, expected {}:{}", persistentMarkDeletedPosition.getLedgerId(), + persistentMarkDeletedPosition.getEntryId(), markDeletedLedgerId, markDeletedEntryId); + Assert.assertTrue(persistentMarkDeletedPosition.getLedgerId() >= markDeletedLedgerId); + if (persistentMarkDeletedPosition.getLedgerId() > markDeletedLedgerId){ + return; + } + Assert.assertTrue(persistentMarkDeletedPosition.getEntryId() >= markDeletedEntryId); + }); + } + + private void waitPersistentCursorLedger(String topicName, String subName, final long markDeletedLedgerId, + final long markDeletedEntryId) throws Exception { + Awaitility.await().untilAsserted(() -> { + Position persistentMarkDeletedPosition = getCursor(topicName, subName).getPersistentMarkDeletedPosition(); + Assert.assertEquals(persistentMarkDeletedPosition.getLedgerId(), markDeletedLedgerId); + Assert.assertEquals(persistentMarkDeletedPosition.getEntryId(), markDeletedEntryId); + }); + } + + private List[] sendManyMessages(String topicName, int ledgerCount, int messageCountPerLedger, + int messageCountPerEntry) throws Exception { + List[] messageIds = new List[ledgerCount]; + for (int i = 0; i < ledgerCount; i++){ + admin.topics().unload(topicName); + if (messageCountPerEntry == 1) { + messageIds[i] = sendManyMessages(topicName, messageCountPerLedger); + } else { + messageIds[i] = sendManyBatchedMessages(topicName, messageCountPerEntry, + messageCountPerLedger / messageCountPerEntry); + } + } + return messageIds; + } + + private List sendManyMessages(String topicName, int messageCountPerLedger, + int messageCountPerEntry) throws Exception { + if (messageCountPerEntry == 1) { + return sendManyMessages(topicName, messageCountPerLedger); + } else { + return sendManyBatchedMessages(topicName, messageCountPerEntry, + messageCountPerLedger / messageCountPerEntry); + } + } + + private List sendManyMessages(String topicName, int msgCount) throws Exception { + List messageIdList = new ArrayList<>(); + final Producer producer = pulsarClient.newProducer(Schema.JSON(String.class)) + .topic(topicName) + .enableBatching(false) + .create(); + long timestamp = System.currentTimeMillis(); + for (int i = 0; i < msgCount; i++){ + String messageSuffix = String.format("%s-%s", timestamp, i); + MessageIdImpl messageIdSent = (MessageIdImpl) producer.newMessage() + .key(String.format("Key-%s", messageSuffix)) + .value(String.format("Msg-%s", messageSuffix)) + .send(); + messageIdList.add(messageIdSent); + } + producer.close(); + return messageIdList; + } + + private List sendManyBatchedMessages(String topicName, int msgCountPerEntry, int entryCount) + throws Exception { + Producer producer = pulsarClient.newProducer(Schema.JSON(String.class)) + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(Integer.MAX_VALUE, TimeUnit.SECONDS) + .batchingMaxMessages(Integer.MAX_VALUE) + .create(); + List> messageIdFutures = new ArrayList<>(); + for (int i = 0; i < entryCount; i++){ + for (int j = 0; j < msgCountPerEntry; j++){ + CompletableFuture messageIdFuture = + producer.newMessage().value(String.format("entry-seq[%s], batch_index[%s]", i, j)).sendAsync(); + messageIdFutures.add(messageIdFuture); + } + producer.flush(); + } + producer.close(); + FutureUtil.waitForAll(messageIdFutures).get(); + return messageIdFutures.stream().map(f -> (MessageIdImpl)f.join()).collect(Collectors.toList()); + } + + private ConsumerAndReceivedMessages waitConsumeAndAllMessages(String topicName, String subName, + final boolean enabledBatch, + boolean ack) throws Exception { + List messageIds = new ArrayList<>(); + final Consumer consumer = createConsumer(topicName, subName, enabledBatch); + while (true){ + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message != null){ + messageIds.add((MessageIdImpl) message.getMessageId()); + if (ack) { + consumer.acknowledge(message); + } + } else { + break; + } + } + log.info("receive {} messages", messageIds.size()); + return new ConsumerAndReceivedMessages(consumer, sortMessageId(messageIds, enabledBatch)); + } + + @AllArgsConstructor + private static class ConsumerAndReceivedMessages { + private Consumer consumer; + private List[] messageIds; + } + + private List[] sortMessageId(List messageIds, boolean enabledBatch){ + Map> map = messageIds.stream().collect(Collectors.groupingBy(v -> v.getLedgerId())); + TreeMap> sortedMap = new TreeMap<>(map); + List[] res = new List[sortedMap.size()]; + Iterator>> iterator = sortedMap.entrySet().iterator(); + for (int i = 0; i < sortedMap.size(); i++){ + res[i] = iterator.next().getValue(); + } + for (List list : res){ + list.sort((m1, m2) -> { + if (enabledBatch){ + BatchMessageIdImpl mb1 = (BatchMessageIdImpl) m1; + BatchMessageIdImpl mb2 = (BatchMessageIdImpl) m2; + return (int) (mb1.getLedgerId() * 1000000 + mb1.getEntryId() * 1000 + mb1.getBatchIndex() - + mb2.getLedgerId() * 1000000 + mb2.getEntryId() * 1000 + mb2.getBatchIndex()); + } + return (int) (m1.getLedgerId() * 1000 + m1.getEntryId() - + m2.getLedgerId() * 1000 + m2.getEntryId()); + }); + } + return res; + } + + private Consumer createConsumer(String topicName, String subName, boolean enabledBatch) throws Exception { + final Consumer consumer = pulsarClient.newConsumer(Schema.JSON(String.class)) + .autoScaledReceiverQueueSizeEnabled(false) + .subscriptionType(SubscriptionType.Failover) + .isAckReceiptEnabled(true) + .enableBatchIndexAcknowledgment(enabledBatch) + .receiverQueueSize(1000) + .topic(topicName) + .subscriptionName(subName) + .subscribe(); + return consumer; + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java index 37a7310ae17ca..3e0887646e119 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java @@ -54,6 +54,8 @@ protected void cleanup() throws Exception { @Override protected void doInitConf() throws Exception { super.doInitConf(); + conf.setBrokerServicePortTls(Optional.of(0)); + conf.setWebServicePortTls(Optional.of(0)); if (useStaticPorts) { conf.setBrokerServicePortTls(Optional.of(6651)); conf.setBrokerServicePort(Optional.of(6660)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java index 47949d7312b88..4a6524bf24521 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java @@ -102,9 +102,9 @@ void setup() throws Exception { createTenant(pulsarAdmins[BROKER_COUNT - 1]); for (int i = 0; i < BROKER_COUNT; i++) { - String topic = String.format("%s/%s/%s:%s", NamespaceService.SLA_NAMESPACE_PROPERTY, "my-cluster", - pulsarServices[i].getAdvertisedAddress(), brokerWebServicePorts[i]); - pulsarAdmins[0].namespaces().createNamespace(topic); + var namespaceName = NamespaceService.getSLAMonitorNamespace(pulsarServices[i].getBrokerId(), + pulsarServices[i].getConfig()); + pulsarAdmins[0].namespaces().createNamespace(namespaceName.toString()); } } @@ -173,9 +173,9 @@ public void testOwnedNamespaces() { public void testOwnershipViaAdminAfterSetup() { for (int i = 0; i < BROKER_COUNT; i++) { try { - String topic = String.format("persistent://%s/%s/%s:%s/%s", - NamespaceService.SLA_NAMESPACE_PROPERTY, "my-cluster", pulsarServices[i].getAdvertisedAddress(), - brokerWebServicePorts[i], "my-topic"); + String topic = String.format("persistent://%s/%s/%s/%s", + NamespaceService.SLA_NAMESPACE_PROPERTY, "my-cluster", + pulsarServices[i].getBrokerId(), "my-topic"); assertEquals(pulsarAdmins[0].lookups().lookupTopic(topic), "pulsar://" + pulsarServices[i].getAdvertisedAddress() + ":" + brokerNativeBrokerPorts[i]); } catch (Exception e) { @@ -199,8 +199,8 @@ public void testUnloadIfBrokerCrashes() { fail("Should be a able to close the broker index " + crashIndex + " Exception: " + e); } - String topic = String.format("persistent://%s/%s/%s:%s/%s", NamespaceService.SLA_NAMESPACE_PROPERTY, - "my-cluster", pulsarServices[crashIndex].getAdvertisedAddress(), brokerWebServicePorts[crashIndex], + String topic = String.format("persistent://%s/%s/%s/%s", NamespaceService.SLA_NAMESPACE_PROPERTY, + "my-cluster", pulsarServices[crashIndex].getBrokerId(), "my-topic"); log.info("Lookup for namespace {}", topic); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java index e6459bbf74c31..ceb3c1d0d9335 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java @@ -126,7 +126,7 @@ public void testEvents(String topicTypePersistence, String topicTypePartitioned, boolean forceDelete) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); events.clear(); if (topicTypePartitioned.equals("partitioned")) { @@ -150,7 +150,7 @@ public void testEventsWithUnload(String topicTypePersistence, String topicTypePa boolean forceDelete) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); events.clear(); admin.topics().unload(topicName); @@ -182,7 +182,7 @@ public void testEventsActiveSub(String topicTypePersistence, String topicTypePar boolean forceDelete) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("sub").subscribe(); Producer producer = pulsarClient.newProducer().topic(topicName).create(); @@ -238,7 +238,7 @@ public void testEventsActiveSub(String topicTypePersistence, String topicTypePar public void testTopicAutoGC(String topicTypePersistence, String topicTypePartitioned) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); admin.namespaces().setInactiveTopicPolicies(namespace, new InactiveTopicPolicies(InactiveTopicDeleteMode.delete_when_no_subscriptions, 1, true)); @@ -262,25 +262,21 @@ public void testTopicAutoGC(String topicTypePersistence, String topicTypePartiti ); } - private void createTopicAndVerifyEvents(String topicTypePartitioned, String topicName) throws Exception { + private void createTopicAndVerifyEvents(String topicDomain, String topicTypePartitioned, String topicName) throws Exception { final String[] expectedEvents; - if (topicTypePartitioned.equals("partitioned")) { - topicNameToWatch = topicName + "-partition-1"; - admin.topics().createPartitionedTopic(topicName, 2); - triggerPartitionsCreation(topicName); - + if (topicDomain.equalsIgnoreCase("persistent") || topicTypePartitioned.equals("partitioned")) { expectedEvents = new String[]{ "LOAD__BEFORE", "CREATE__BEFORE", "CREATE__SUCCESS", "LOAD__SUCCESS" }; - } else { - topicNameToWatch = topicName; - admin.topics().createNonPartitionedTopic(topicName); - expectedEvents = new String[]{ + // Before https://github.com/apache/pulsar/pull/21995, Pulsar will skip create topic if the topic + // was already exists, and the action "check topic exists" will try to load Managed ledger, + // the check triggers two exrtra events: [LOAD__BEFORE, LOAD__FAILURE]. + // #21995 fixed this wrong behavior, so remove these two events. "LOAD__BEFORE", "LOAD__FAILURE", "LOAD__BEFORE", @@ -288,7 +284,14 @@ private void createTopicAndVerifyEvents(String topicTypePartitioned, String topi "CREATE__SUCCESS", "LOAD__SUCCESS" }; - + } + if (topicTypePartitioned.equals("partitioned")) { + topicNameToWatch = topicName + "-partition-1"; + admin.topics().createPartitionedTopic(topicName, 2); + triggerPartitionsCreation(topicName); + } else { + topicNameToWatch = topicName; + admin.topics().createNonPartitionedTopic(topicName); } Awaitility.waitAtMost(10, TimeUnit.SECONDS).untilAsserted(() -> diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java index fb4f880efff07..fe3c8b591ed6c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.broker.admin; +import static java.util.concurrent.TimeUnit.MINUTES; import static org.apache.commons.lang3.StringUtils.isBlank; +import static org.apache.pulsar.broker.BrokerTestUtil.newUniqueName; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -35,6 +37,7 @@ import java.lang.reflect.Field; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.time.Clock; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -64,6 +67,7 @@ import org.apache.pulsar.broker.admin.AdminApiTest.MockedPulsarService; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerWrapper; import org.apache.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; @@ -127,6 +131,7 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -135,6 +140,10 @@ public class AdminApi2Test extends MockedPulsarServiceBaseTest { private MockedPulsarService mockPulsarSetup; + private boolean restartClusterAfterTest; + private int usageCount; + private String defaultNamespace; + private String defaultTenant; @BeforeClass @Override @@ -149,13 +158,50 @@ public void setup() throws Exception { setupClusters(); } + @Test + public void testExceptionOfMaxTopicsPerNamespaceCanBeHanle() throws Exception { + super.internalCleanup(); + conf.setMaxTopicsPerNamespace(3); + super.internalSetup(); + String topic = "persistent://testTenant/ns1/test_create_topic_v"; + TenantInfoImpl tenantInfo = new TenantInfoImpl(Sets.newHashSet("role1", "role2"), + Sets.newHashSet("test")); + // check producer/consumer auto create non-partitioned topic + conf.setAllowAutoTopicCreationType(TopicType.NON_PARTITIONED); + admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + admin.tenants().createTenant("testTenant", tenantInfo); + admin.namespaces().createNamespace("testTenant/ns1", Sets.newHashSet("test")); + + pulsarClient.newProducer().topic(topic + "1").create().close(); + pulsarClient.newProducer().topic(topic + "2").create().close(); + pulsarClient.newConsumer().topic(topic + "3").subscriptionName("test_sub").subscribe().close(); + try { + pulsarClient.newConsumer().topic(topic + "4").subscriptionName("test_sub") + .subscribeAsync().get(5, TimeUnit.SECONDS); + Assert.fail(); + } catch (Exception e) { + assertTrue(e.getCause() instanceof PulsarClientException.NotAllowedException); + } + + // reset configuration + conf.setMaxTopicsPerNamespace(0); + conf.setDefaultNumPartitions(1); + } + @Override protected ServiceConfiguration getDefaultConf() { ServiceConfiguration conf = super.getDefaultConf(); + configureDefaults(conf); + return conf; + } + + void configureDefaults(ServiceConfiguration conf) { conf.setForceDeleteNamespaceAllowed(true); conf.setLoadBalancerEnabled(true); conf.setEnableNamespaceIsolationUpdateOnTime(true); - return conf; + conf.setAllowOverrideEntryFilters(true); + conf.setEntryFilterNames(List.of()); + conf.setMaxNumPartitionsPerPartitionedTopic(0); } @AfterClass(alwaysRun = true) @@ -171,6 +217,20 @@ public void cleanup() throws Exception { @AfterMethod(alwaysRun = true) public void resetClusters() throws Exception { + if (restartClusterAfterTest) { + restartClusterAndResetUsageCount(); + } else { + try { + cleanupCluster(); + } catch (Exception e) { + log.error("Failed to clean up state by deleting namespaces and tenants after test. " + + "Restarting the test broker.", e); + restartClusterAndResetUsageCount(); + } + } + } + + private void cleanupCluster() throws Exception { pulsar.getConfiguration().setForceDeleteTenantAllowed(true); pulsar.getConfiguration().setForceDeleteNamespaceAllowed(true); for (String tenant : admin.tenants().getTenants()) { @@ -178,22 +238,59 @@ public void resetClusters() throws Exception { deleteNamespaceWithRetry(namespace, true, admin, pulsar, mockPulsarSetup.getPulsar()); } - admin.tenants().deleteTenant(tenant, true); + try { + admin.tenants().deleteTenant(tenant, true); + } catch (Exception e) { + log.error("Failed to delete tenant {} after test", tenant, e); + String zkDirectory = "/managed-ledgers/" + tenant; + try { + log.info("Listing {} to see if existing keys are preventing deletion.", zkDirectory); + pulsar.getPulsarResources().getLocalMetadataStore().get().getChildren(zkDirectory) + .get(5, TimeUnit.SECONDS).forEach(key -> log.info("Child key '{}'", key)); + } catch (Exception ignore) { + log.error("Failed to list tenant {} ZK directory {} after test", tenant, zkDirectory, e); + } + throw e; + } } for (String cluster : admin.clusters().getClusters()) { admin.clusters().deleteCluster(cluster); } - resetConfig(); + configureDefaults(conf); setupClusters(); } + private void restartClusterAfterTest() { + restartClusterAfterTest = true; + } + + private void restartClusterAndResetUsageCount() throws Exception { + cleanup(); + restartClusterAfterTest = false; + usageCount = 0; + setup(); + } + + private void restartClusterIfReused() throws Exception { + if (usageCount > 1) { + restartClusterAndResetUsageCount(); + } + } + + @BeforeMethod + public void increaseUsageCount() { + usageCount++; + } + private void setupClusters() throws PulsarAdminException { admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); - admin.tenants().createTenant("prop-xyz", tenantInfo); - admin.namespaces().createNamespace("prop-xyz/ns1", Set.of("test")); + defaultTenant = newUniqueName("prop-xyz"); + admin.tenants().createTenant(defaultTenant, tenantInfo); + defaultNamespace = defaultTenant + "/ns1"; + admin.namespaces().createNamespace(defaultNamespace, Set.of("test")); } @DataProvider(name = "topicType") @@ -250,7 +347,7 @@ public void testIncrementPartitionsOfTopic() throws Exception { final String subName2 = topicName + "-my-sub-2/encode"; final int startPartitions = 4; final int newPartitions = 8; - final String partitionedTopicName = "persistent://prop-xyz/ns1/" + topicName; + final String partitionedTopicName = "persistent://" + defaultNamespace + "/" + topicName; URL pulsarUrl = new URL(pulsar.getWebServiceAddress()); @@ -299,7 +396,7 @@ public void testIncrementPartitionsOfTopic() throws Exception { assertEquals(new HashSet<>(admin.topics().getSubscriptions(newPartitionTopicName)), Set.of(subName1, subName2)); - assertEquals(new HashSet<>(admin.topics().getList("prop-xyz/ns1")).size(), newPartitions); + assertEquals(new HashSet<>(admin.topics().getList(defaultNamespace)).size(), newPartitions); // test cumulative stats for partitioned topic PartitionedTopicStats topicStats = admin.topics().getPartitionedStats(partitionedTopicName, false); @@ -333,27 +430,22 @@ public void testIncrementPartitionsOfTopic() throws Exception { @Test public void testTopicPoliciesWithMultiBroker() throws Exception { + restartClusterAfterTest(); + //setup cluster with 3 broker - cleanup(); - setup(); admin.clusters().updateCluster("test", ClusterData.builder().serviceUrl((pulsar.getWebServiceAddress() + ",localhost:1026," + "localhost:2050")).build()); TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); - admin.tenants().createTenant("prop-xyz2", tenantInfo); - admin.namespaces().createNamespace("prop-xyz2/ns1", Set.of("test")); - conf.setBrokerServicePort(Optional.of(1024)); - conf.setBrokerServicePortTls(Optional.of(1025)); - conf.setWebServicePort(Optional.of(1026)); - conf.setWebServicePortTls(Optional.of(1027)); + String tenantName = newUniqueName("prop-xyz2"); + admin.tenants().createTenant(tenantName, tenantInfo); + admin.namespaces().createNamespace(tenantName + "/ns1", Set.of("test")); + ServiceConfiguration config2 = super.getDefaultConf(); @Cleanup - PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(conf); + PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(config2); PulsarService pulsar2 = pulsarTestContext2.getPulsarService(); - conf.setBrokerServicePort(Optional.of(2048)); - conf.setBrokerServicePortTls(Optional.of(2049)); - conf.setWebServicePort(Optional.of(2050)); - conf.setWebServicePortTls(Optional.of(2051)); + ServiceConfiguration config3 = super.getDefaultConf(); @Cleanup - PulsarTestContext pulsarTestContext3 = createAdditionalPulsarTestContext(conf); + PulsarTestContext pulsarTestContext3 = createAdditionalPulsarTestContext(config3); PulsarService pulsar3 = pulsarTestContext.getPulsarService(); @Cleanup PulsarAdmin admin2 = PulsarAdmin.builder().serviceHttpUrl(pulsar2.getWebServiceAddress()).build(); @@ -361,14 +453,14 @@ public void testTopicPoliciesWithMultiBroker() throws Exception { PulsarAdmin admin3 = PulsarAdmin.builder().serviceHttpUrl(pulsar3.getWebServiceAddress()).build(); //for partitioned topic, we can get topic policies from every broker - final String topic = "persistent://prop-xyz2/ns1/" + BrokerTestUtil.newUniqueName("test"); + final String topic = "persistent://" + tenantName + "/ns1/" + newUniqueName("test"); int partitionNum = 3; admin.topics().createPartitionedTopic(topic, partitionNum); pulsarClient.newConsumer().topic(topic).subscriptionName("sub").subscribe().close(); setTopicPoliciesAndValidate(admin2, admin3, topic); //for non-partitioned topic, we can get topic policies from every broker - final String topic2 = "persistent://prop-xyz2/ns1/" + BrokerTestUtil.newUniqueName("test"); + final String topic2 = "persistent://" + tenantName + "/ns1/" + newUniqueName("test"); pulsarClient.newConsumer().topic(topic2).subscriptionName("sub").subscribe().close(); setTopicPoliciesAndValidate(admin2, admin3, topic2); } @@ -402,7 +494,7 @@ private void setTopicPoliciesAndValidate(PulsarAdmin admin2 public void nonPersistentTopics() throws Exception { final String topicName = "nonPersistentTopic"; - final String nonPersistentTopicName = "non-persistent://prop-xyz/ns1/" + topicName; + final String nonPersistentTopicName = "non-persistent://" + defaultNamespace + "/" + topicName; // Force to create a topic publishMessagesOnTopic(nonPersistentTopicName, 0, 0); @@ -423,8 +515,7 @@ public void nonPersistentTopics() throws Exception { assertEquals(topicStats.getSubscriptions().get("my-sub").getMsgDropRate(), 0); assertEquals(topicStats.getPublishers().size(), 0); assertEquals(topicStats.getMsgDropRate(), 0); - assertEquals(topicStats.getOwnerBroker(), - pulsar.getAdvertisedAddress() + ":" + pulsar.getConfiguration().getWebServicePort().get()); + assertEquals(topicStats.getOwnerBroker(), pulsar.getBrokerId()); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(nonPersistentTopicName, false); assertEquals(internalStats.cursors.keySet(), Set.of("my-sub")); @@ -434,7 +525,7 @@ public void nonPersistentTopics() throws Exception { assertTrue(topicStats.getSubscriptions().containsKey("my-sub")); assertEquals(topicStats.getPublishers().size(), 0); // test partitioned-topic - final String partitionedTopicName = "non-persistent://prop-xyz/ns1/paritioned"; + final String partitionedTopicName = "non-persistent://" + defaultNamespace + "/paritioned"; admin.topics().createPartitionedTopic(partitionedTopicName, 5); assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, 5); } @@ -462,7 +553,7 @@ private void publishMessagesOnTopic(String topicName, int messages, int startIdx @Test public void testSetPersistencePolicies() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); admin.namespaces().createNamespace(namespace, Set.of("test")); assertNull(admin.namespaces().getPersistence(namespace)); @@ -500,7 +591,7 @@ public void testSetPersistencePolicies() throws Exception { @Test public void testUpdatePersistencePolicyUpdateManagedCursor() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String topicName = "persistent://" + namespace + "/topic1"; admin.namespaces().createNamespace(namespace, Set.of("test")); @@ -541,7 +632,7 @@ public void testUpdatePersistencePolicyUpdateManagedCursor() throws Exception { @Test(dataProvider = "topicType") public void testUnloadTopic(final String topicType) throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String topicName = topicType + "://" + namespace + "/topic1"; admin.namespaces().createNamespace(namespace, Set.of("test")); @@ -591,11 +682,12 @@ private void unloadTopic(String topicName) throws Exception { */ @Test(dataProvider = "namespaceNames", timeOut = 30000) public void testResetCursorOnPosition(String namespaceName) throws Exception { - final String topicName = "persistent://prop-xyz/use/" + namespaceName + "/resetPosition"; + restartClusterAfterTest(); + final String topicName = "persistent://" + defaultTenant + "/use/" + namespaceName + "/resetPosition"; final int totalProducedMessages = 50; // set retention - admin.namespaces().setRetention("prop-xyz/ns1", new RetentionPolicies(10, 10)); + admin.namespaces().setRetention(defaultNamespace, new RetentionPolicies(10, 10)); // create consumer and subscription Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub") @@ -674,14 +766,11 @@ public void testResetCursorOnPosition(String namespaceName) throws Exception { } consumer.close(); - - cleanup(); - setup(); } @Test public void shouldNotSupportResetOnPartitionedTopic() throws PulsarAdminException, PulsarClientException { - final String partitionedTopicName = "persistent://prop-xyz/ns1/" + BrokerTestUtil.newUniqueName("parttopic"); + final String partitionedTopicName = "persistent://" + defaultNamespace + "/" + newUniqueName("parttopic"); admin.topics().createPartitionedTopic(partitionedTopicName, 4); @Cleanup Consumer consumer = pulsarClient.newConsumer().topic(partitionedTopicName).subscriptionName("my-sub") @@ -713,7 +802,9 @@ private void publishMessagesOnPersistentTopic(String topicName, int messages, in @Test(timeOut = 20000) public void testMaxConsumersOnSubApi() throws Exception { - final String namespace = "prop-xyz/ns1"; + final String namespace = newUniqueName(defaultTenant + "/ns"); + admin.namespaces().createNamespace(namespace, Set.of("test")); + assertNull(admin.namespaces().getMaxConsumersPerSubscription(namespace)); admin.namespaces().setMaxConsumersPerSubscription(namespace, 10); Awaitility.await().untilAsserted(() -> { @@ -732,7 +823,7 @@ public void testMaxConsumersOnSubApi() throws Exception { */ @Test public void testLoadReportApi() throws Exception { - + restartClusterAfterTest(); this.conf.setLoadManagerClassName(SimpleLoadManagerImpl.class.getName()); @Cleanup("cleanup") MockedPulsarService mockPulsarSetup1 = new MockedPulsarService(this.conf); @@ -795,6 +886,8 @@ public void testPeerCluster() throws Exception { */ @Test public void testReplicationPeerCluster() throws Exception { + restartClusterAfterTest(); + admin.clusters().createCluster("us-west1", ClusterData.builder().serviceUrl("http://broker.messaging.west1.example.com:8080").build()); admin.clusters().createCluster("us-west2", @@ -814,7 +907,7 @@ public void testReplicationPeerCluster() throws Exception { assertEquals(allClusters, List.of("test", "us-east1", "us-east2", "us-west1", "us-west2", "us-west3", "us-west4")); - final String property = "peer-prop"; + final String property = newUniqueName("peer-prop"); Set allowedClusters = Set.of("us-west1", "us-west2", "us-west3", "us-west4", "us-east1", "us-east2", "global"); TenantInfoImpl propConfig = new TenantInfoImpl(Set.of("test"), allowedClusters); @@ -848,9 +941,6 @@ public void testReplicationPeerCluster() throws Exception { clusterIds = Set.of("us-west1", "us-west4"); // no peer coexist in replication clusters admin.namespaces().setNamespaceReplicationClusters(namespace, clusterIds); - - cleanup(); - setup(); } @Test @@ -889,7 +979,7 @@ public void clusterFailureDomain() throws PulsarAdminException { @Test public void namespaceAntiAffinity() throws PulsarAdminException { - final String namespace = "prop-xyz/ns1"; + final String namespace = defaultNamespace; final String antiAffinityGroup = "group"; assertTrue(isBlank(admin.namespaces().getNamespaceAntiAffinityGroup(namespace))); admin.namespaces().setNamespaceAntiAffinityGroup(namespace, antiAffinityGroup); @@ -897,9 +987,9 @@ public void namespaceAntiAffinity() throws PulsarAdminException { admin.namespaces().deleteNamespaceAntiAffinityGroup(namespace); assertTrue(isBlank(admin.namespaces().getNamespaceAntiAffinityGroup(namespace))); - final String ns1 = "prop-xyz/antiAG1"; - final String ns2 = "prop-xyz/antiAG2"; - final String ns3 = "prop-xyz/antiAG3"; + final String ns1 = defaultTenant + "/antiAG1"; + final String ns2 = defaultTenant + "/antiAG2"; + final String ns3 = defaultTenant + "/antiAG3"; admin.namespaces().createNamespace(ns1, Set.of("test")); admin.namespaces().createNamespace(ns2, Set.of("test")); admin.namespaces().createNamespace(ns3, Set.of("test")); @@ -908,19 +998,19 @@ public void namespaceAntiAffinity() throws PulsarAdminException { admin.namespaces().setNamespaceAntiAffinityGroup(ns3, antiAffinityGroup); Set namespaces = new HashSet<>( - admin.namespaces().getAntiAffinityNamespaces("prop-xyz", "test", antiAffinityGroup)); + admin.namespaces().getAntiAffinityNamespaces(defaultTenant, "test", antiAffinityGroup)); assertEquals(namespaces.size(), 3); assertTrue(namespaces.contains(ns1)); assertTrue(namespaces.contains(ns2)); assertTrue(namespaces.contains(ns3)); - List namespaces2 = admin.namespaces().getAntiAffinityNamespaces("prop-xyz", "test", "invalid-group"); + List namespaces2 = admin.namespaces().getAntiAffinityNamespaces(defaultTenant, "test", "invalid-group"); assertEquals(namespaces2.size(), 0); } @Test public void testPersistentTopicList() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String topicName = "non-persistent://" + namespace + "/bundle-topic"; admin.namespaces().createNamespace(namespace, 20); admin.namespaces().setNamespaceReplicationClusters(namespace, Set.of("test")); @@ -954,7 +1044,7 @@ public void testPersistentTopicList() throws Exception { @Test public void testCreateAndGetTopicProperties() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String nonPartitionedTopicName = "persistent://" + namespace + "/non-partitioned-TopicProperties"; admin.namespaces().createNamespace(namespace, 20); Map nonPartitionedTopicProperties = new HashMap<>(); @@ -975,7 +1065,7 @@ public void testCreateAndGetTopicProperties() throws Exception { @Test public void testUpdatePartitionedTopicProperties() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String topicName = "persistent://" + namespace + "/testUpdatePartitionedTopicProperties"; final String topicNameTwo = "persistent://" + namespace + "/testUpdatePartitionedTopicProperties2"; admin.namespaces().createNamespace(namespace, 20); @@ -1033,7 +1123,7 @@ public void testUpdatePartitionedTopicProperties() throws Exception { @Test public void testUpdateNonPartitionedTopicProperties() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String topicName = "persistent://" + namespace + "/testUpdateNonPartitionedTopicProperties"; admin.namespaces().createNamespace(namespace, 20); @@ -1068,7 +1158,7 @@ public void testUpdateNonPartitionedTopicProperties() throws Exception { @Test public void testNonPersistentTopics() throws Exception { - final String namespace = "prop-xyz/ns2"; + final String namespace = newUniqueName(defaultTenant + "/ns2"); final String topicName = "non-persistent://" + namespace + "/topic"; admin.namespaces().createNamespace(namespace, 20); admin.namespaces().setNamespaceReplicationClusters(namespace, Set.of("test")); @@ -1096,7 +1186,7 @@ public void testNonPersistentTopics() throws Exception { public void testPublishConsumerStats() throws Exception { final String topicName = "statTopic"; final String subscriberName = topicName + "-my-sub-1"; - final String topic = "persistent://prop-xyz/ns1/" + topicName; + final String topic = "persistent://" + defaultNamespace + "/" + topicName; final String producerName = "myProducer"; @Cleanup @@ -1143,6 +1233,8 @@ public void testPublishConsumerStats() throws Exception { @Test public void testTenantNameWithUnderscore() throws Exception { + restartClusterAfterTest(); + TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); admin.tenants().createTenant("prop_xyz", tenantInfo); @@ -1150,15 +1242,12 @@ public void testTenantNameWithUnderscore() throws Exception { String topic = "persistent://prop_xyz/use/my-namespace/my-topic"; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) .create(); TopicStats stats = admin.topics().getStats(topic); assertEquals(stats.getPublishers().size(), 1); - producer.close(); - - cleanup(); - setup(); } @Test @@ -1200,11 +1289,11 @@ public void testTenantWithNonexistentClusters() throws Exception { assertFalse(admin.tenants().getTenants().contains("test-tenant")); // Check existing tenant - assertTrue(admin.tenants().getTenants().contains("prop-xyz")); + assertTrue(admin.tenants().getTenants().contains(defaultTenant)); // If we try to update existing tenant with nonexistent clusters, it should fail immediately try { - admin.tenants().updateTenant("prop-xyz", tenantInfo); + admin.tenants().updateTenant(defaultTenant, tenantInfo); fail("Should have failed"); } catch (PulsarAdminException e) { assertEquals(e.getStatusCode(), Status.PRECONDITION_FAILED.getStatusCode()); @@ -1219,7 +1308,7 @@ public void brokerNamespaceIsolationPolicies() throws Exception { String cluster = pulsar.getConfiguration().getClusterName(); String namespaceRegex = "other/" + cluster + "/other.*"; String brokerName = pulsar.getAdvertisedAddress(); - String brokerAddress = brokerName + ":" + pulsar.getConfiguration().getWebServicePort().get(); + String brokerAddress = pulsar.getBrokerId(); Map parameters1 = new HashMap<>(); parameters1.put("min_limit", "1"); @@ -1227,7 +1316,7 @@ public void brokerNamespaceIsolationPolicies() throws Exception { NamespaceIsolationData nsPolicyData1 = NamespaceIsolationData.builder() .namespaces(Collections.singletonList(namespaceRegex)) - .primary(Collections.singletonList(brokerName + ":[0-9]*")) + .primary(Collections.singletonList(brokerName)) .secondary(Collections.singletonList(brokerName + ".*")) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) @@ -1264,7 +1353,7 @@ public void brokerNamespaceIsolationPolicies() throws Exception { @Test public void brokerNamespaceIsolationPoliciesUpdateOnTime() throws Exception { String brokerName = pulsar.getAdvertisedAddress(); - String ns1Name = "prop-xyz/test_ns1_iso_" + System.currentTimeMillis(); + String ns1Name = defaultTenant + "/test_ns1_iso_" + System.currentTimeMillis(); admin.namespaces().createNamespace(ns1Name, Set.of("test")); // 0. without isolation policy configured, lookup will success. @@ -1335,36 +1424,25 @@ public void clustersList() throws PulsarAdminException { */ @Test public void testClusterIsReadyBeforeCreateTopic() throws Exception { + restartClusterAfterTest(); final String topicName = "partitionedTopic"; final int partitions = 4; - final String persistentPartitionedTopicName = "persistent://prop-xyz/ns2/" + topicName; - final String NonPersistentPartitionedTopicName = "non-persistent://prop-xyz/ns2/" + topicName; + final String persistentPartitionedTopicName = "persistent://" + defaultTenant + "/ns2/" + topicName; + final String NonPersistentPartitionedTopicName = "non-persistent://" + defaultTenant + "/ns2/" + topicName; - admin.namespaces().createNamespace("prop-xyz/ns2"); + admin.namespaces().createNamespace(defaultTenant + "/ns2"); // By default the cluster will configure as configuration file. So the create topic operation // will never throw exception except there is no cluster. - admin.namespaces().setNamespaceReplicationClusters("prop-xyz/ns2", new HashSet()); + admin.namespaces().setNamespaceReplicationClusters(defaultTenant + "/ns2", Sets.newHashSet(configClusterName)); - try { - admin.topics().createPartitionedTopic(persistentPartitionedTopicName, partitions); - Assert.fail("should have failed due to Namespace does not have any clusters configured"); - } catch (PulsarAdminException.PreconditionFailedException ignored) { - } - - try { - admin.topics().createPartitionedTopic(NonPersistentPartitionedTopicName, partitions); - Assert.fail("should have failed due to Namespace does not have any clusters configured"); - } catch (PulsarAdminException.PreconditionFailedException ignored) { - } - - cleanup(); - setup(); + admin.topics().createPartitionedTopic(persistentPartitionedTopicName, partitions); + admin.topics().createPartitionedTopic(NonPersistentPartitionedTopicName, partitions); } @Test public void testCreateNamespaceWithNoClusters() throws PulsarAdminException { String localCluster = pulsar.getConfiguration().getClusterName(); - String namespace = "prop-xyz/test-ns-with-no-clusters"; + String namespace = newUniqueName(defaultTenant + "/test-ns-with-no-clusters"); admin.namespaces().createNamespace(namespace); // Global cluster, if there, should be omitted from the results @@ -1377,7 +1455,7 @@ public void testConsumerStatsLastTimestamp() throws PulsarClientException, Pulsa long timestamp = System.currentTimeMillis(); final String topicName = "consumer-stats-" + timestamp; final String subscribeName = topicName + "-test-stats-sub"; - final String topic = "persistent://prop-xyz/ns1/" + topicName; + final String topic = "persistent://" + defaultNamespace + "/" + topicName; final String producerName = "producer-" + topicName; @Cleanup @@ -1480,10 +1558,9 @@ public void testConsumerStatsLastTimestamp() throws PulsarClientException, Pulsa @Test(timeOut = 30000) public void testPreciseBacklog() throws Exception { - cleanup(); - setup(); + restartClusterIfReused(); - final String topic = "persistent://prop-xyz/ns1/precise-back-log"; + final String topic = "persistent://" + defaultNamespace + "/precise-back-log"; final String subName = "sub-name"; @Cleanup @@ -1533,6 +1610,7 @@ public void testPreciseBacklog() throws Exception { @Test public void testDeleteTenant() throws Exception { + restartClusterAfterTest(); // Disabled conf: systemTopicEnabled. see: https://github.com/apache/pulsar/pull/17070 boolean originalSystemTopicEnabled = conf.isSystemTopicEnabled(); if (originalSystemTopicEnabled) { @@ -1542,7 +1620,7 @@ public void testDeleteTenant() throws Exception { } pulsar.getConfiguration().setForceDeleteNamespaceAllowed(false); - String tenant = "test-tenant-1"; + String tenant = newUniqueName("test-tenant-1"); assertFalse(admin.tenants().getTenants().contains(tenant)); // create tenant @@ -1588,12 +1666,6 @@ public void testDeleteTenant() throws Exception { assertFalse(pulsar.getLocalMetadataStore().exists(partitionedTopicPath).join()); assertFalse(pulsar.getLocalMetadataStore().exists(localPoliciesPath).join()); assertFalse(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); - // Reset conf: systemTopicEnabled - if (originalSystemTopicEnabled) { - cleanup(); - conf.setSystemTopicEnabled(true); - setup(); - } } @Data @@ -1628,13 +1700,16 @@ private void setNamespaceAttr(NamespaceAttr namespaceAttr){ @Test(dataProvider = "namespaceAttrs") public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { + restartClusterAfterTest(); + // Set conf. cleanup(); - NamespaceAttr originalNamespaceAttr = markOriginalNamespaceAttr(); setNamespaceAttr(namespaceAttr); + this.conf.setMetadataStoreUrl("127.0.0.1:2181"); + this.conf.setConfigurationMetadataStoreUrl("127.0.0.1:2182"); setup(); - String tenant = "test-tenant"; + String tenant = newUniqueName("test-tenant"); assertFalse(admin.tenants().getTenants().contains(tenant)); // create tenant @@ -1652,6 +1727,28 @@ public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { admin.topics().createPartitionedTopic(topic, 10); assertFalse(admin.topics().getList(namespace).isEmpty()); + final String managedLedgersPath = "/managed-ledgers/" + namespace; + final String bundleDataPath = "/loadbalance/bundle-data/" + namespace; + // Trigger bundle owned by brokers. + pulsarClient.newProducer().topic(topic).create().close(); + // Trigger bundle data write to ZK. + Awaitility.await().untilAsserted(() -> { + boolean bundleDataWereWriten = false; + for (PulsarService ps : new PulsarService[]{pulsar, mockPulsarSetup.getPulsar()}) { + ModularLoadManagerWrapper loadManager = (ModularLoadManagerWrapper) ps.getLoadManager().get(); + ModularLoadManagerImpl loadManagerImpl = (ModularLoadManagerImpl) loadManager.getLoadManager(); + ps.getBrokerService().updateRates(); + loadManagerImpl.updateLocalBrokerData(); + loadManagerImpl.writeBundleDataOnZooKeeper(); + bundleDataWereWriten = bundleDataWereWriten || ps.getLocalMetadataStore().exists(bundleDataPath).join(); + } + assertTrue(bundleDataWereWriten); + }); + + // assert znode exists in metadata store + assertTrue(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); + assertTrue(pulsar.getLocalMetadataStore().exists(managedLedgersPath).join()); + try { admin.namespaces().deleteNamespace(namespace, false); fail("should have failed due to namespace not empty"); @@ -1668,23 +1765,14 @@ public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { assertFalse(admin.namespaces().getNamespaces(tenant).contains(namespace)); assertTrue(admin.namespaces().getNamespaces(tenant).isEmpty()); - - final String managedLedgersPath = "/managed-ledgers/" + namespace; + // assert znode deleted in metadata store assertFalse(pulsar.getLocalMetadataStore().exists(managedLedgersPath).join()); - - - final String bundleDataPath = "/loadbalance/bundle-data/" + namespace; assertFalse(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); - - // Reset config - cleanup(); - setNamespaceAttr(originalNamespaceAttr); - setup(); } @Test public void testDeleteNamespaceWithTopicPolicies() throws Exception { - String tenant = "test-tenant"; + String tenant = newUniqueName("test-tenant"); assertFalse(admin.tenants().getTenants().contains(tenant)); // create tenant @@ -1732,7 +1820,7 @@ public void testDeleteNamespaceWithTopicPolicies() throws Exception { @Test(timeOut = 30000) public void testBacklogNoDelayed() throws PulsarClientException, PulsarAdminException, InterruptedException { - final String topic = "persistent://prop-xyz/ns1/precise-back-log-no-delayed-" + UUID.randomUUID().toString(); + final String topic = "persistent://" + defaultNamespace + "/precise-back-log-no-delayed-" + UUID.randomUUID().toString(); final String subName = "sub-name"; @Cleanup @@ -1788,7 +1876,7 @@ public void testBacklogNoDelayed() throws PulsarClientException, PulsarAdminExce @Test public void testPreciseBacklogForPartitionedTopic() throws PulsarClientException, PulsarAdminException { - final String topic = "persistent://prop-xyz/ns1/precise-back-log-for-partitioned-topic"; + final String topic = "persistent://" + defaultNamespace + "/precise-back-log-for-partitioned-topic"; admin.topics().createPartitionedTopic(topic, 2); final String subName = "sub-name"; @@ -1830,7 +1918,7 @@ public void testPreciseBacklogForPartitionedTopic() throws PulsarClientException @Test(timeOut = 30000) public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientException, PulsarAdminException, InterruptedException { - final String topic = "persistent://prop-xyz/ns1/precise-back-log-no-delayed-partitioned-topic"; + final String topic = "persistent://" + defaultNamespace + "/precise-back-log-no-delayed-partitioned-topic"; admin.topics().createPartitionedTopic(topic, 2); final String subName = "sub-name"; @@ -1845,6 +1933,8 @@ public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientExcepti .acknowledgmentGroupTime(0, TimeUnit.SECONDS) .subscribe(); + long start1 = 0; + long start2 = 0; @Cleanup Producer producer = client.newProducer() .topic(topic) @@ -1852,6 +1942,12 @@ public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientExcepti .create(); for (int i = 0; i < 10; i++) { + if (i == 0) { + start1 = Clock.systemUTC().millis(); + } + if (i == 5) { + start2 = Clock.systemUTC().millis(); + } if (i > 4) { producer.newMessage() .value("message-1".getBytes(StandardCharsets.UTF_8)) @@ -1862,29 +1958,34 @@ public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientExcepti } } // wait until the message add to delay queue. + long finalStart1 = start1; Awaitility.await().untilAsserted(() -> { TopicStats topicStats = admin.topics().getPartitionedStats(topic, false, true, true, true); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 10); assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 440); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklogNoDelayed(), 5); + assertTrue(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog() >= finalStart1); }); for (int i = 0; i < 5; i++) { consumer.acknowledge(consumer.receive()); } // Wait the ack send. - Awaitility.await().untilAsserted(() -> { + long finalStart2 = start2; + Awaitility.await().timeout(1, MINUTES).untilAsserted(() -> { TopicStats topicStats2 = admin.topics().getPartitionedStats(topic, false, true, true, true); assertEquals(topicStats2.getSubscriptions().get(subName).getMsgBacklog(), 5); assertEquals(topicStats2.getSubscriptions().get(subName).getBacklogSize(), 223); assertEquals(topicStats2.getSubscriptions().get(subName).getMsgBacklogNoDelayed(), 0); + assertTrue(topicStats2.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog() >= finalStart2); }); } @Test public void testMaxNumPartitionsPerPartitionedTopicSuccess() { - final String topic = "persistent://prop-xyz/ns1/max-num-partitions-per-partitioned-topic-success"; + restartClusterAfterTest(); + final String topic = "persistent://" + defaultNamespace + "/max-num-partitions-per-partitioned-topic-success"; pulsar.getConfiguration().setMaxNumPartitionsPerPartitionedTopic(3); try { @@ -1899,7 +2000,8 @@ public void testMaxNumPartitionsPerPartitionedTopicSuccess() { @Test public void testMaxNumPartitionsPerPartitionedTopicFailure() { - final String topic = "persistent://prop-xyz/ns1/max-num-partitions-per-partitioned-topic-failure"; + restartClusterAfterTest(); + final String topic = "persistent://" + defaultNamespace + "/max-num-partitions-per-partitioned-topic-failure"; pulsar.getConfiguration().setMaxNumPartitionsPerPartitionedTopic(2); try { @@ -1916,21 +2018,24 @@ public void testMaxNumPartitionsPerPartitionedTopicFailure() { @Test public void testListOfNamespaceBundles() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); - admin.tenants().createTenant("prop-xyz2", tenantInfo); - admin.namespaces().createNamespace("prop-xyz2/ns1", 10); - admin.namespaces().setNamespaceReplicationClusters("prop-xyz2/ns1", Set.of("test")); - admin.namespaces().createNamespace("prop-xyz2/test/ns2", 10); - assertEquals(admin.namespaces().getBundles("prop-xyz2/ns1").getNumBundles(), 10); - assertEquals(admin.namespaces().getBundles("prop-xyz2/test/ns2").getNumBundles(), 10); + String tenantName = newUniqueName("prop-xyz2"); + admin.tenants().createTenant(tenantName, tenantInfo); + admin.namespaces().createNamespace(tenantName + "/ns1", 10); + admin.namespaces().setNamespaceReplicationClusters(tenantName + "/ns1", Set.of("test")); + admin.namespaces().createNamespace(tenantName + "/test/ns2", 10); + assertEquals(admin.namespaces().getBundles(tenantName + "/ns1").getNumBundles(), 10); + assertEquals(admin.namespaces().getBundles(tenantName + "/test/ns2").getNumBundles(), 10); - admin.namespaces().deleteNamespace("prop-xyz2/test/ns2"); + admin.namespaces().deleteNamespace(tenantName + "/test/ns2"); } @Test public void testForceDeleteNamespace() throws Exception { - final String namespaceName = "prop-xyz2/ns1"; + restartClusterAfterTest(); + String tenantName = newUniqueName("prop-xyz2"); + final String namespaceName = tenantName + "/ns1"; TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); - admin.tenants().createTenant("prop-xyz2", tenantInfo); + admin.tenants().createTenant(tenantName, tenantInfo); admin.namespaces().createNamespace(namespaceName, 1); final String topic = "persistent://" + namespaceName + "/test" + UUID.randomUUID(); pulsarClient.newProducer(Schema.DOUBLE).topic(topic).create().close(); @@ -1942,17 +2047,15 @@ public void testForceDeleteNamespace() throws Exception { } catch (PulsarAdminException e) { assertEquals(e.getStatusCode(), 404); } - - cleanup(); - setup(); } @Test public void testForceDeleteNamespaceWithAutomaticTopicCreation() throws Exception { conf.setForceDeleteNamespaceAllowed(true); - final String namespaceName = "prop-xyz2/ns1"; + String tenantName = newUniqueName("prop-xyz2"); + final String namespaceName = tenantName + "/ns1"; TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); - admin.tenants().createTenant("prop-xyz2", tenantInfo); + admin.tenants().createTenant(tenantName, tenantInfo); admin.namespaces().createNamespace(namespaceName, 1); admin.namespaces().setAutoTopicCreation(namespaceName, AutoTopicCreationOverride.builder() @@ -1977,7 +2080,7 @@ public void testForceDeleteNamespaceWithAutomaticTopicCreation() throws Exceptio // the consumer will try to re-create the partitions admin.namespaces().deleteNamespace(namespaceName, true); - assertFalse(admin.namespaces().getNamespaces("prop-xyz2").contains("ns1")); + assertFalse(admin.namespaces().getNamespaces(tenantName).contains("ns1")); } } @@ -2000,6 +2103,7 @@ public void testUpdateClusterWithProxyUrl() throws Exception { @Test public void testMaxNamespacesPerTenant() throws Exception { + restartClusterAfterTest(); cleanup(); conf.setMaxNamespacesPerTenant(2); setup(); @@ -2013,19 +2117,11 @@ public void testMaxNamespacesPerTenant() throws Exception { Assert.assertEquals(e.getStatusCode(), 412); Assert.assertEquals(e.getHttpError(), "Exceed the maximum number of namespace in tenant :testTenant"); } - - //unlimited - cleanup(); - conf.setMaxNamespacesPerTenant(0); - setup(); - admin.tenants().createTenant("testTenant", tenantInfo); - for (int i = 0; i < 10; i++) { - admin.namespaces().createNamespace("testTenant/ns-" + i, Set.of("test")); - } } @Test public void testAutoTopicCreationOverrideWithMaxNumPartitionsLimit() throws Exception{ + restartClusterAfterTest(); cleanup(); conf.setMaxNumPartitionsPerPartitionedTopic(10); setup(); @@ -2067,6 +2163,7 @@ public void testAutoTopicCreationOverrideWithMaxNumPartitionsLimit() throws Exce } @Test public void testMaxTopicsPerNamespace() throws Exception { + restartClusterAfterTest(); cleanup(); conf.setMaxTopicsPerNamespace(10); setup(); @@ -2158,16 +2255,12 @@ public void testMaxTopicsPerNamespace() throws Exception { } catch (PulsarClientException e) { log.info("Exception: ", e); } - - // reset configuration - conf.setMaxTopicsPerNamespace(0); - conf.setDefaultNumPartitions(1); } @Test public void testInvalidBundleErrorResponse() throws Exception { try { - admin.namespaces().deleteNamespaceBundle("prop-xyz/ns1", "invalid-bundle"); + admin.namespaces().deleteNamespaceBundle(defaultNamespace, "invalid-bundle"); fail("should have failed due to invalid bundle"); } catch (PreconditionFailedException e) { assertTrue(e.getMessage().startsWith("Invalid bundle range")); @@ -2176,6 +2269,7 @@ public void testInvalidBundleErrorResponse() throws Exception { @Test public void testMaxSubscriptionsPerTopic() throws Exception { + restartClusterAfterTest(); cleanup(); conf.setMaxSubscriptionsPerTopic(2); setup(); @@ -2258,7 +2352,7 @@ public void testMaxSubscriptionsPerTopic() throws Exception { @Test(timeOut = 30000) public void testMaxSubPerTopicApi() throws Exception { - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Set.of("test")); assertNull(admin.namespaces().getMaxSubscriptionsPerTopic(myNamespace)); @@ -2281,8 +2375,11 @@ public void testMaxSubPerTopicApi() throws Exception { } } - @Test(timeOut = 30000) + @Test(timeOut = 60000) public void testSetNamespaceEntryFilters() throws Exception { + restartClusterAfterTest(); + restartClusterIfReused(); + @Cleanup final MockEntryFilterProvider testEntryFilterProvider = new MockEntryFilterProvider(conf); @@ -2299,7 +2396,7 @@ public void testSetNamespaceEntryFilters() throws Exception { try { EntryFilters entryFilters = new EntryFilters("test"); - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); final String topicName = myNamespace + "/topic"; admin.topics().createNonPartitionedTopic(topicName); @@ -2359,6 +2456,9 @@ public void testSetNamespaceEntryFilters() throws Exception { @Test(dataProvider = "topicType") public void testSetTopicLevelEntryFilters(String topicType) throws Exception { + restartClusterAfterTest(); + restartClusterIfReused(); + @Cleanup final MockEntryFilterProvider testEntryFilterProvider = new MockEntryFilterProvider(conf); @@ -2373,7 +2473,7 @@ public void testSetTopicLevelEntryFilters(String topicType) throws Exception { "entryFilterProvider", testEntryFilterProvider, true); try { EntryFilters entryFilters = new EntryFilters("test"); - final String topic = topicType + "://prop-xyz/ns1/test-schema-validation-enforced"; + final String topic = topicType + "://" + defaultNamespace + "/test-schema-validation-enforced"; admin.topics().createPartitionedTopic(topic, 1); final String fullTopicName = topic + TopicName.PARTITIONED_TOPIC_SUFFIX + 0; @Cleanup @@ -2431,13 +2531,13 @@ public void testSetTopicLevelEntryFilters(String topicType) throws Exception { } } - @Test(timeOut = 30000) + @Test(timeOut = 60000) public void testSetEntryFiltersHierarchy() throws Exception { + restartClusterAfterTest(); + restartClusterIfReused(); + @Cleanup final MockEntryFilterProvider testEntryFilterProvider = new MockEntryFilterProvider(conf); - conf.setEntryFilterNames(List.of("test", "test1")); - conf.setAllowOverrideEntryFilters(true); - testEntryFilterProvider.setMockEntryFilters(new EntryFilterDefinition( "test", null, @@ -2450,9 +2550,11 @@ public void testSetEntryFiltersHierarchy() throws Exception { final EntryFilterProvider oldEntryFilterProvider = pulsar.getBrokerService().getEntryFilterProvider(); FieldUtils.writeField(pulsar.getBrokerService(), "entryFilterProvider", testEntryFilterProvider, true); + conf.setEntryFilterNames(List.of("test", "test1")); + conf.setAllowOverrideEntryFilters(true); try { - final String topic = "persistent://prop-xyz/ns1/test-schema-validation-enforced"; + final String topic = "persistent://" + defaultNamespace + "/test-schema-validation-enforced"; admin.topics().createPartitionedTopic(topic, 1); final String fullTopicName = topic + TopicName.PARTITIONED_TOPIC_SUFFIX + 0; @Cleanup @@ -2460,8 +2562,9 @@ public void testSetEntryFiltersHierarchy() throws Exception { .topic(fullTopicName) .create(); assertNull(admin.topicPolicies().getEntryFiltersPerTopic(topic, false)); - assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, true), - new EntryFilters("test,test1")); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, true), + new EntryFilters("test,test1"))); assertEquals(pulsar .getBrokerService() .getTopic(fullTopicName, false) @@ -2471,10 +2574,11 @@ public void testSetEntryFiltersHierarchy() throws Exception { .size(), 2); EntryFilters nsEntryFilters = new EntryFilters("test"); - admin.namespaces().setNamespaceEntryFilters("prop-xyz/ns1", nsEntryFilters); - assertEquals(admin.namespaces().getNamespaceEntryFilters("prop-xyz/ns1"), nsEntryFilters); - assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, true), - new EntryFilters("test")); + admin.namespaces().setNamespaceEntryFilters(defaultNamespace, nsEntryFilters); + assertEquals(admin.namespaces().getNamespaceEntryFilters(defaultNamespace), nsEntryFilters); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, true), + new EntryFilters("test"))); Awaitility.await().untilAsserted(() -> { assertEquals(pulsar .getBrokerService() @@ -2503,8 +2607,9 @@ public void testSetEntryFiltersHierarchy() throws Exception { admin.topicPolicies().setEntryFiltersPerTopic(topic, topicEntryFilters); Awaitility.await().untilAsserted(() -> assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, false), topicEntryFilters)); - assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, true), - new EntryFilters("test1")); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topicPolicies().getEntryFiltersPerTopic(topic, true), + new EntryFilters("test1"))); Awaitility.await().untilAsserted(() -> { assertEquals(pulsar .getBrokerService() @@ -2530,8 +2635,11 @@ public void testSetEntryFiltersHierarchy() throws Exception { } } - @Test(timeOut = 30000) + @Test(timeOut = 60000) public void testValidateNamespaceEntryFilters() throws Exception { + restartClusterAfterTest(); + restartClusterIfReused(); + @Cleanup final MockEntryFilterProvider testEntryFilterProvider = new MockEntryFilterProvider(conf); @@ -2546,7 +2654,7 @@ public void testValidateNamespaceEntryFilters() throws Exception { "entryFilterProvider", testEntryFilterProvider, true); try { - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); try { admin.namespaces().setNamespaceEntryFilters(myNamespace, new EntryFilters("notexists")); @@ -2585,8 +2693,11 @@ public void testValidateNamespaceEntryFilters() throws Exception { } } - @Test(timeOut = 30000) + @Test(timeOut = 60000) public void testValidateTopicEntryFilters() throws Exception { + restartClusterAfterTest(); + restartClusterIfReused(); + @Cleanup final MockEntryFilterProvider testEntryFilterProvider = new MockEntryFilterProvider(conf); @@ -2601,7 +2712,7 @@ public void testValidateTopicEntryFilters() throws Exception { "entryFilterProvider", testEntryFilterProvider, true); try { - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); final String topicName = myNamespace + "/topic"; admin.topics().createNonPartitionedTopic(topicName); @@ -2648,8 +2759,9 @@ public void testValidateTopicEntryFilters() throws Exception { @Test(timeOut = 30000) public void testMaxSubPerTopic() throws Exception { + restartClusterAfterTest(); pulsar.getConfiguration().setMaxSubscriptionsPerTopic(0); - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Set.of("test")); final String topic = "persistent://" + myNamespace + "/testMaxSubPerTopic"; pulsarClient.newProducer().topic(topic).create().close(); @@ -2689,12 +2801,13 @@ public void testMaxSubPerTopic() throws Exception { @Test(timeOut = 30000) public void testMaxSubPerTopicPriority() throws Exception { + restartClusterAfterTest(); final int brokerLevelMaxSub = 2; cleanup(); conf.setMaxSubscriptionsPerTopic(brokerLevelMaxSub); setup(); - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Set.of("test")); final String topic = "persistent://" + myNamespace + "/testMaxSubPerTopic"; //Create a client that can fail quickly @@ -2746,12 +2859,13 @@ public void testMaxSubPerTopicPriority() throws Exception { @Test public void testMaxProducersPerTopicUnlimited() throws Exception { + restartClusterAfterTest(); final int maxProducersPerTopic = 1; cleanup(); conf.setMaxProducersPerTopic(maxProducersPerTopic); setup(); //init namespace - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Set.of("test")); final String topic = "persistent://" + myNamespace + "/testMaxProducersPerTopicUnlimited"; //the policy is set to 0, so there will be no restrictions @@ -2797,12 +2911,13 @@ public void testMaxProducersPerTopicUnlimited() throws Exception { @Test public void testMaxConsumersPerTopicUnlimited() throws Exception { + restartClusterAfterTest(); final int maxConsumersPerTopic = 1; cleanup(); conf.setMaxConsumersPerTopic(maxConsumersPerTopic); setup(); //init namespace - final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); + final String myNamespace = newUniqueName(defaultTenant + "/ns"); admin.namespaces().createNamespace(myNamespace, Set.of("test")); final String topic = "persistent://" + myNamespace + "/testMaxConsumersPerTopicUnlimited"; @@ -2854,7 +2969,7 @@ public void testMaxConsumersPerTopicUnlimited() throws Exception { @Test public void testClearBacklogForTheSubscriptionThatNoConsumers() throws Exception { - final String topic = "persistent://prop-xyz/ns1/clear_backlog_no_consumers" + UUID.randomUUID().toString(); + final String topic = "persistent://" + defaultNamespace + "/clear_backlog_no_consumers" + UUID.randomUUID().toString(); final String sub = "my-sub"; admin.topics().createNonPartitionedTopic(topic); admin.topics().createSubscription(topic, sub, MessageId.earliest); @@ -2863,7 +2978,10 @@ public void testClearBacklogForTheSubscriptionThatNoConsumers() throws Exception @Test(timeOut = 200000) public void testCompactionApi() throws Exception { - final String namespace = "prop-xyz/ns1"; + final String namespace = newUniqueName(defaultTenant + "/ns"); + admin.namespaces().createNamespace(namespace, Set.of("test")); + + assertNull(admin.namespaces().getCompactionThreshold(namespace)); assertEquals(pulsar.getConfiguration().getBrokerServiceCompactionThresholdInBytes(), 0); @@ -2878,11 +2996,13 @@ public void testCompactionApi() throws Exception { @Test(timeOut = 200000) public void testCompactionPriority() throws Exception { + restartClusterAfterTest(); cleanup(); conf.setBrokerServiceCompactionMonitorIntervalInSeconds(10000); setup(); - final String topic = "persistent://prop-xyz/ns1/topic" + UUID.randomUUID(); - final String namespace = "prop-xyz/ns1"; + final String namespace = newUniqueName(defaultTenant + "/ns"); + admin.namespaces().createNamespace(namespace, Set.of("test")); + final String topic = "persistent://" + namespace + "/topic" + UUID.randomUUID(); pulsarClient.newProducer().topic(topic).create().close(); TopicName topicName = TopicName.get(topic); PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService().getTopicIfExists(topic).get().get(); @@ -2921,7 +3041,8 @@ public void testCompactionPriority() throws Exception { @Test public void testProperties() throws Exception { - final String namespace = "prop-xyz/ns1"; + final String namespace = newUniqueName(defaultTenant + "/ns"); + admin.namespaces().createNamespace(namespace, Set.of("test")); admin.namespaces().setProperty(namespace, "a", "a"); assertEquals("a", admin.namespaces().getProperty(namespace, "a")); assertNull(admin.namespaces().getProperty(namespace, "b")); @@ -2944,7 +3065,7 @@ public void testProperties() throws Exception { @Test public void testGetListInBundle() throws Exception { - final String namespace = "prop-xyz/ns11"; + final String namespace = defaultTenant + "/ns11"; admin.namespaces().createNamespace(namespace, 3); final String persistentTopicName = TopicName.get( @@ -2978,7 +3099,8 @@ public void testGetListInBundle() throws Exception { @Test public void testGetTopicsWithDifferentMode() throws Exception { - final String namespace = "prop-xyz/ns1"; + final String namespace = newUniqueName(defaultTenant + "/ns"); + admin.namespaces().createNamespace(namespace, Set.of("test")); final String persistentTopicName = TopicName .get("persistent", NamespaceName.get(namespace), "get_topics_mode_" + UUID.randomUUID().toString()) @@ -3021,16 +3143,14 @@ public void testGetTopicsWithDifferentMode() throws Exception { @Test(dataProvider = "isV1") public void testNonPartitionedTopic(boolean isV1) throws Exception { - String tenant = "prop-xyz"; + restartClusterAfterTest(); + String tenant = defaultTenant; String cluster = "test"; String namespace = tenant + "/" + (isV1 ? cluster + "/" : "") + "n1" + isV1; String topic = "persistent://" + namespace + "/t1" + isV1; admin.namespaces().createNamespace(namespace, Set.of(cluster)); admin.topics().createNonPartitionedTopic(topic); assertTrue(admin.topics().getList(namespace).contains(topic)); - - cleanup(); - setup(); } /** @@ -3043,7 +3163,7 @@ public void testFailedUpdatePartitionedTopic() throws Exception { final String subName1 = topicName + "-my-sub-1"; final int startPartitions = 4; final int newPartitions = 8; - final String partitionedTopicName = "persistent://prop-xyz/ns1/" + topicName; + final String partitionedTopicName = "persistent://" + defaultNamespace + "/" + topicName; URL pulsarUrl = new URL(pulsar.getWebServiceAddress()); @@ -3062,7 +3182,7 @@ public void testFailedUpdatePartitionedTopic() throws Exception { admin.topics().createSubscription(partitionedTopicName + "-partition-" + startPartitions, subName1, MessageId.earliest); fail("Unexpected behaviour"); - } catch (PulsarAdminException.PreconditionFailedException ex) { + } catch (PulsarAdminException.ConflictException ex) { // OK } @@ -3077,13 +3197,37 @@ public void testFailedUpdatePartitionedTopic() throws Exception { assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, newPartitions); } + /** + * Validate retring failed partitioned topic should succeed. + * @throws Exception + */ + @Test + public void testTopicStatsWithEarliestTimeInBacklogIfNoBacklog() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + final String subscriptionName = "s1"; + admin.topics().createNonPartitionedTopic(topicName); + admin.topics().createSubscription(topicName, subscriptionName, MessageId.earliest); + + // Send one message. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).enableBatching(false) + .create(); + MessageIdImpl messageId = (MessageIdImpl) producer.send("123"); + // Catch up. + admin.topics().skipAllMessages(topicName, subscriptionName); + // Get topic stats with earliestTimeInBacklog + TopicStats topicStats = admin.topics().getStats(topicName, false, false, true); + assertEquals(topicStats.getSubscriptions().get(subscriptionName).getEarliestMsgPublishTimeInBacklog(), -1L); + + // cleanup. + producer.close(); + admin.topics().delete(topicName); + } + @Test(dataProvider = "topicType") public void testPartitionedStatsAggregationByProducerName(String topicType) throws Exception { - cleanup(); - setup(); - + restartClusterIfReused(); conf.setAggregatePublisherStatsByProducerName(true); - final String topic = topicType + "://prop-xyz/ns1/test-partitioned-stats-aggregation-by-producer-name"; + final String topic = topicType + "://" + defaultNamespace + "/test-partitioned-stats-aggregation-by-producer-name"; admin.topics().createPartitionedTopic(topic, 10); @Cleanup @@ -3137,8 +3281,9 @@ public int choosePartition(Message msg, TopicMetadata metadata) { @Test(dataProvider = "topicType") public void testPartitionedStatsAggregationByProducerNamePerPartition(String topicType) throws Exception { + restartClusterIfReused(); conf.setAggregatePublisherStatsByProducerName(true); - final String topic = topicType + "://prop-xyz/ns1/test-partitioned-stats-aggregation-by-producer-name-per-pt"; + final String topic = topicType + "://" + defaultNamespace + "/test-partitioned-stats-aggregation-by-producer-name-per-pt"; admin.topics().createPartitionedTopic(topic, 2); @Cleanup @@ -3161,7 +3306,7 @@ public void testPartitionedStatsAggregationByProducerNamePerPartition(String top @Test(dataProvider = "topicType") public void testSchemaValidationEnforced(String topicType) throws Exception { - final String topic = topicType + "://prop-xyz/ns1/test-schema-validation-enforced"; + final String topic = topicType + "://" + defaultNamespace + "/test-schema-validation-enforced"; admin.topics().createPartitionedTopic(topic, 1); @Cleanup Producer producer1 = pulsarClient.newProducer() @@ -3177,31 +3322,31 @@ public void testSchemaValidationEnforced(String topicType) throws Exception { @Test public void testGetNamespaceTopicList() throws Exception { - final String persistentTopic = "persistent://prop-xyz/ns1/testGetNamespaceTopicList"; - final String nonPersistentTopic = "non-persistent://prop-xyz/ns1/non-testGetNamespaceTopicList"; - final String eventTopic = "persistent://prop-xyz/ns1/__change_events"; + final String persistentTopic = "persistent://" + defaultNamespace + "/testGetNamespaceTopicList"; + final String nonPersistentTopic = "non-persistent://" + defaultNamespace + "/non-testGetNamespaceTopicList"; + final String eventTopic = "persistent://" + defaultNamespace + "/__change_events"; admin.topics().createNonPartitionedTopic(persistentTopic); Awaitility.await().untilAsserted(() -> - admin.namespaces().getTopics("prop-xyz/ns1", + admin.namespaces().getTopics(defaultNamespace, ListNamespaceTopicsOptions.builder().mode(Mode.PERSISTENT).includeSystemTopic(true).build()) .contains(eventTopic)); - List notIncludeSystemTopics = admin.namespaces().getTopics("prop-xyz/ns1", + List notIncludeSystemTopics = admin.namespaces().getTopics(defaultNamespace, ListNamespaceTopicsOptions.builder().includeSystemTopic(false).build()); Assert.assertFalse(notIncludeSystemTopics.contains(eventTopic)); @Cleanup Producer producer = pulsarClient.newProducer() .topic(nonPersistentTopic) .create(); - List notPersistentTopics = admin.namespaces().getTopics("prop-xyz/ns1", + List notPersistentTopics = admin.namespaces().getTopics(defaultNamespace, ListNamespaceTopicsOptions.builder().mode(Mode.NON_PERSISTENT).build()); Assert.assertTrue(notPersistentTopics.contains(nonPersistentTopic)); } @Test private void testTerminateSystemTopic() throws Exception { - final String topic = "persistent://prop-xyz/ns1/testTerminateSystemTopic"; + final String topic = "persistent://" + defaultNamespace + "/testTerminateSystemTopic"; admin.topics().createNonPartitionedTopic(topic); - final String eventTopic = "persistent://prop-xyz/ns1/__change_events"; + final String eventTopic = "persistent://" + defaultNamespace + "/__change_events"; admin.topicPolicies().setMaxConsumers(topic, 2); Awaitility.await().untilAsserted(() -> { Assert.assertEquals(admin.topicPolicies().getMaxConsumers(topic), Integer.valueOf(2)); @@ -3213,12 +3358,91 @@ private void testTerminateSystemTopic() throws Exception { @Test private void testDeleteNamespaceForciblyWithManyTopics() throws Exception { - final String ns = "prop-xyz/ns-testDeleteNamespaceForciblyWithManyTopics"; + final String ns = defaultTenant + "/ns-testDeleteNamespaceForciblyWithManyTopics"; admin.namespaces().createNamespace(ns, 2); for (int i = 0; i < 100; i++) { admin.topics().createPartitionedTopic(String.format("persistent://%s", ns + "/topic" + i), 3); } admin.namespaces().deleteNamespace(ns, true); - Assert.assertFalse(admin.namespaces().getNamespaces("prop-xyz").contains(ns)); + Assert.assertFalse(admin.namespaces().getNamespaces(defaultTenant).contains(ns)); + } + + @Test + private void testSetBacklogQuotasNamespaceLevelIfRetentionExists() throws Exception { + final String ns = defaultTenant + "/ns-testSetBacklogQuotasNamespaceLevel"; + final long backlogQuotaLimitSize = 100000002; + final int backlogQuotaLimitTime = 2; + admin.namespaces().createNamespace(ns, 2); + // create retention. + admin.namespaces().setRetention(ns, new RetentionPolicies(1800, 10000)); + // set backlog quota. + admin.namespaces().setBacklogQuota(ns, BacklogQuota.builder() + .limitSize(backlogQuotaLimitSize).limitTime(backlogQuotaLimitTime).build()); + // Verify result. + Map map = admin.namespaces().getBacklogQuotaMap(ns); + assertEquals(map.size(), 1); + assertTrue(map.containsKey(BacklogQuota.BacklogQuotaType.destination_storage)); + BacklogQuota backlogQuota = map.get(BacklogQuota.BacklogQuotaType.destination_storage); + assertEquals(backlogQuota.getLimitSize(), backlogQuotaLimitSize); + assertEquals(backlogQuota.getLimitTime(), backlogQuotaLimitTime); + // cleanup. + admin.namespaces().deleteNamespace(ns); + } + + @Test + private void testAnalyzeSubscriptionBacklogNotCauseStuck() throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp"); + final String subscription = "s1"; + admin.topics().createNonPartitionedTopic(topic); + // Send 10 messages. + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).topic(topic).subscriptionName(subscription) + .receiverQueueSize(0).subscribe(); + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic).create(); + for (int i = 0; i < 10; i++) { + producer.send(i + ""); + } + + // Verify consumer can receive all messages after calling "analyzeSubscriptionBacklog". + admin.topics().analyzeSubscriptionBacklog(topic, subscription, Optional.of(MessageIdImpl.earliest)); + for (int i = 0; i < 10; i++) { + Awaitility.await().untilAsserted(() -> { + Message m = consumer.receive(); + assertNotNull(m); + consumer.acknowledge(m); + }); + } + + // cleanup. + consumer.close(); + producer.close(); + admin.topics().delete(topic); + } + + @Test + public void testGetStatsIfPartitionNotExists() throws Exception { + // create topic. + final String partitionedTp = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp"); + admin.topics().createPartitionedTopic(partitionedTp, 1); + TopicName partition0 = TopicName.get(partitionedTp).getPartition(0); + boolean topicExists1 = pulsar.getBrokerService().getTopic(partition0.toString(), false).join().isPresent(); + assertTrue(topicExists1); + // Verify topics-stats works. + TopicStats topicStats = admin.topics().getStats(partition0.toString()); + assertNotNull(topicStats); + + // Delete partition and call topic-stats again. + admin.topics().delete(partition0.toString()); + boolean topicExists2 = pulsar.getBrokerService().getTopic(partition0.toString(), false).join().isPresent(); + assertFalse(topicExists2); + // Verify: respond 404. + try { + admin.topics().getStats(partition0.toString()); + fail("Should respond 404 after the partition was deleted"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("Topic partitions were not yet created")); + } + + // cleanup. + admin.topics().deletePartitionedTopic(partitionedTp); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java index 3d2a6b934f847..27d72f98c2c49 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java @@ -168,7 +168,7 @@ public Map, Collection>> register(Object callback, Object... c testNamespace, "my-topic", true); } catch (Exception e) { //System.out.println(e.getMessage()); - Assert.assertEquals("Topic not found", e.getMessage()); + Assert.assertTrue(e.getMessage().contains("Topic not found")); } String key = "legendtkl"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java index a780f889de85f..357422b11f6ce 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java @@ -23,6 +23,7 @@ import static org.testng.Assert.assertTrue; import java.lang.management.ManagementFactory; import java.lang.management.ThreadMXBean; +import java.lang.reflect.Field; import java.time.Duration; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -31,13 +32,21 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ProducerBuilderImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.TopicVersion; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.compaction.Compactor; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.springframework.util.CollectionUtils; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -236,4 +245,58 @@ public void testHealthCheckupV2() throws Exception { )) ); } + + class DummyProducerBuilder extends ProducerBuilderImpl { + // This is a dummy producer builder to test the health check timeout + // the producer constructed by this builder will not send any message + public DummyProducerBuilder(PulsarClientImpl client, Schema schema) { + super(client, schema); + } + + @Override + public CompletableFuture> createAsync() { + CompletableFuture> future = new CompletableFuture<>(); + super.createAsync().thenAccept(producer -> { + Producer spyProducer = Mockito.spy(producer); + Mockito.doReturn(CompletableFuture.completedFuture(MessageId.earliest)) + .when(spyProducer).sendAsync(Mockito.any()); + future.complete(spyProducer); + }).exceptionally(ex -> { + future.completeExceptionally(ex); + return null; + }); + return future; + } + } + + @Test + public void testHealthCheckTimeOut() throws Exception { + final String testHealthCheckTopic = String.format("persistent://pulsar/localhost:%s/healthcheck", + pulsar.getConfig().getWebServicePort().get()); + PulsarClient client = pulsar.getClient(); + PulsarClient spyClient = Mockito.spy(client); + Mockito.doReturn(new DummyProducerBuilder<>((PulsarClientImpl) spyClient, Schema.BYTES)) + .when(spyClient).newProducer(Schema.STRING); + // use reflection to replace the client in the broker + Field field = PulsarService.class.getDeclaredField("client"); + field.setAccessible(true); + field.set(pulsar, spyClient); + try { + admin.brokers().healthcheck(TopicVersion.V2); + throw new Exception("Should not reach here"); + } catch (PulsarAdminException e) { + log.info("Exception caught", e); + assertTrue(e.getMessage().contains("LowOverheadTimeoutException")); + } + // To ensure we don't have any subscription, the producers and readers are closed. + Awaitility.await().untilAsserted(() -> + assertTrue(CollectionUtils.isEmpty(admin.topics() + .getSubscriptions(testHealthCheckTopic).stream() + // All system topics are using compaction, even though is not explicitly set in the policies. + .filter(v -> !v.equals(Compactor.COMPACTION_SUBSCRIPTION)) + .collect(Collectors.toList()) + )) + ); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java index 13cac6b5905e0..b0f611317f8af 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java @@ -81,9 +81,8 @@ public void testGetLeaderBroker() assertTrue(leaderBroker.isPresent()); log.info("Leader broker is {}", leaderBroker); for (PulsarAdmin admin : getAllAdmins()) { - String serviceUrl = admin.brokers().getLeaderBroker().getServiceUrl(); - log.info("Pulsar admin get leader broker is {}", serviceUrl); - assertEquals(leaderBroker.get().getServiceUrl(), serviceUrl); + String brokerId = admin.brokers().getLeaderBroker().getBrokerId(); + assertEquals(leaderBroker.get().getBrokerId(), brokerId); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java index e8e582f80b0dc..f67bd6fcfce5b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java @@ -49,6 +49,8 @@ import org.apache.pulsar.common.policies.data.SchemaAutoUpdateCompatibilityStrategy; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.protocol.schema.IsCompatibilityResponse; +import org.apache.pulsar.common.protocol.schema.PostSchemaPayload; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaInfoWithVersion; import org.apache.pulsar.common.schema.SchemaType; @@ -185,17 +187,6 @@ private void testSchemaInfoApi(Schema schema, } - @Test - public void testCreateBytesSchema() { - // forbid admin api creating BYTES schema to be consistent with client side - try { - testSchemaInfoApi(Schema.BYTES, "schematest/test/test-BYTES"); - fail("should fail"); - } catch (Exception e) { - assertTrue(e.getMessage().contains("Do not upload a BYTES schema")); - } - } - @Test(dataProvider = "version") public void testPostSchemaCompatibilityStrategy(ApiVersion version) throws PulsarAdminException { String namespace = format("%s%s%s", "schematest", (ApiVersion.V1.equals(version) ? "/" + cluster + "/" : "/"), @@ -449,4 +440,31 @@ public void testGetSchemaCompatibilityStrategyWhenSetBrokerLevelAndSchemaAutoUpd admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace), SchemaCompatibilityStrategy.UNDEFINED)); } + + @Test + public void testCompatibility() throws Exception { + String topicName = schemaCompatibilityNamespace + "/testCompatibility"; + try { + admin.schemas().getSchemaInfo(topicName); + fail(); + } catch (PulsarAdminException.NotFoundException e) { + assertEquals(e.getMessage(), "Schema not found"); + } + Map properties = new HashMap<>(); + PostSchemaPayload postSchemaPayload = new PostSchemaPayload("STRING", "", properties); + admin.schemas().createSchema(topicName, postSchemaPayload); + IsCompatibilityResponse isCompatibilityResponse = + admin.schemas().testCompatibility(topicName, postSchemaPayload); + + assertTrue(isCompatibilityResponse.isCompatibility()); + assertEquals(isCompatibilityResponse.getSchemaCompatibilityStrategy(), SchemaCompatibilityStrategy.FULL.name()); + postSchemaPayload = new PostSchemaPayload("INT8", "", properties); + try { + admin.schemas().testCompatibility(topicName, postSchemaPayload); + fail(); + } catch (Exception e) { + assertTrue(e instanceof PulsarAdminException.ServerSideErrorException); + assertTrue(e.getMessage().contains("Incompatible schema: exists schema type STRING, new schema type INT8")); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java index e33108203cc81..b5ce948725f71 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java @@ -55,11 +55,13 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import javax.ws.rs.client.InvocationCallback; import javax.ws.rs.client.WebTarget; import javax.ws.rs.core.Response.Status; import lombok.Builder; import lombok.Cleanup; +import lombok.SneakyThrows; import lombok.Value; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedgerInfo; @@ -90,6 +92,7 @@ import org.apache.pulsar.client.api.ConsumerCryptoFailureAction; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageListener; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; @@ -154,9 +157,6 @@ public class AdminApiTest extends MockedPulsarServiceBaseTest { private static final Logger LOG = LoggerFactory.getLogger(AdminApiTest.class); - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/certificate/server.crt"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/certificate/server.key"; - private MockedPulsarService mockPulsarSetup; private PulsarService otherPulsar; @@ -185,8 +185,8 @@ private void applyDefaultConfig() { conf.setLoadBalancerEnabled(true); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setMessageExpiryCheckIntervalInMinutes(1); conf.setSubscriptionExpiryCheckIntervalInMinutes(1); conf.setBrokerDeleteInactiveTopicsEnabled(false); @@ -203,7 +203,7 @@ private void setupConfigAndStart(java.util.function.Consumer nsMap = admin.brokers().getOwnedNamespaces("test", list.get(0)); // since sla-monitor ns is not created nsMap.size() == 1 (for HeartBeat Namespace) @@ -533,7 +537,7 @@ public void brokers() throws Exception { for (String ns : nsMap.keySet()) { NamespaceOwnershipStatus nsStatus = nsMap.get(ns); if (ns.equals( - NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()) + NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfiguration()) + "/0x00000000_0xffffffff")) { assertEquals(nsStatus.broker_assignment, BrokerAssignment.shared); assertFalse(nsStatus.is_controlled); @@ -541,10 +545,7 @@ public void brokers() throws Exception { } } - String[] parts = list.get(0).split(":"); - Assert.assertEquals(parts.length, 2); - Map nsMap2 = adminTls.brokers().getOwnedNamespaces("test", - String.format("%s:%d", parts[0], pulsar.getListenPortHTTPS().get())); + Map nsMap2 = adminTls.brokers().getOwnedNamespaces("test", list.get(0)); Assert.assertEquals(nsMap2.size(), 2); deleteNamespaceWithRetry("prop-xyz/ns1", false); @@ -699,6 +700,10 @@ public void testInvalidDynamicConfigContentInMetadata() throws Exception { Awaitility.await().until(() -> pulsar.getConfiguration().getBrokerShutdownTimeoutMs() == newValue); // verify value is updated assertEquals(pulsar.getConfiguration().getBrokerShutdownTimeoutMs(), newValue); + // reset config + pulsar.getConfiguration().setBrokerShutdownTimeoutMs(0L); + // restart broker + restartBroker(); } /** @@ -797,6 +802,8 @@ public void namespaces() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test", "usw")); admin.tenants().updateTenant("prop-xyz", tenantInfo); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), Set.of("test", "usw"))); assertEquals(admin.namespaces().getPolicies("prop-xyz/ns1").bundles, PoliciesUtil.defaultBundle()); @@ -929,8 +936,7 @@ public void persistentTopics(String topicName) throws Exception { assertEquals(topicStats.getSubscriptions().get(subName).getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 10); assertEquals(topicStats.getPublishers().size(), 0); - assertEquals(topicStats.getOwnerBroker(), - pulsar.getAdvertisedAddress() + ":" + pulsar.getConfiguration().getWebServicePort().get()); + assertEquals(topicStats.getOwnerBroker(), pulsar.getBrokerId()); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(persistentTopicName, false); assertEquals(internalStats.cursors.keySet(), Set.of(Codec.encode(subName))); @@ -996,6 +1002,52 @@ public void persistentTopics(String topicName) throws Exception { assertEquals(admin.topics().getList("prop-xyz/ns1"), new ArrayList<>()); } + @Test(dataProvider = "topicName") + public void testSkipHoleMessages(String topicName) throws Exception { + final String subName = topicName; + assertEquals(admin.topics().getList("prop-xyz/ns1"), new ArrayList<>()); + + final String persistentTopicName = "persistent://prop-xyz/ns1/" + topicName; + // Force to create a topic + publishMessagesOnPersistentTopic("persistent://prop-xyz/ns1/" + topicName, 0); + assertEquals(admin.topics().getList("prop-xyz/ns1"), + List.of("persistent://prop-xyz/ns1/" + topicName)); + + // create consumer and subscription + @Cleanup + PulsarClient client = PulsarClient.builder() + .serviceUrl(pulsar.getWebServiceAddress()) + .statsInterval(0, TimeUnit.SECONDS) + .build(); + AtomicInteger total = new AtomicInteger(); + Consumer consumer = client.newConsumer().topic(persistentTopicName) + .messageListener(new MessageListener() { + @SneakyThrows + @Override + public void received(Consumer consumer, Message msg) { + if (total.get() %2 !=0){ + // artificially created 50 hollow messages + consumer.acknowledge(msg); + } + total.incrementAndGet(); + } + }) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Exclusive).subscribe(); + + assertEquals(admin.topics().getSubscriptions(persistentTopicName), List.of(subName)); + + publishMessagesOnPersistentTopic("persistent://prop-xyz/ns1/" + topicName, 100); + TimeUnit.SECONDS.sleep(2); + TopicStats topicStats = admin.topics().getStats(persistentTopicName); + long msgBacklog = topicStats.getSubscriptions().get(subName).getMsgBacklog(); + log.info("back={}",msgBacklog); + int skipNumber = 20; + admin.topics().skipMessages(persistentTopicName, subName, skipNumber); + topicStats = admin.topics().getStats(persistentTopicName); + assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), msgBacklog - skipNumber); + } + @Test(dataProvider = "topicNamesForAllTypes") public void partitionedTopics(String topicType, String topicName) throws Exception { final String namespace = "prop-xyz/ns1"; @@ -1242,7 +1294,7 @@ public void testGetStats() throws Exception { TopicStats topicStats = admin.topics().getStats(topic, false, false, true); assertEquals(topicStats.getEarliestMsgPublishTimeInBacklogs(), 0); - assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), 0); + assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), -1); assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), -1); // publish several messages @@ -1262,10 +1314,28 @@ public void testGetStats() throws Exception { topicStats = admin.topics().getStats(topic, false, true, true); assertEquals(topicStats.getEarliestMsgPublishTimeInBacklogs(), 0); - assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), 0); + assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), -1); assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 0); } + @Test + public void testGetPartitionedStatsContainSubscriptionType() throws Exception { + final String topic = "persistent://prop-xyz/ns1/my-topic" + UUID.randomUUID(); + final int numPartitions = 4; + admin.topics().createPartitionedTopic(topic, numPartitions); + + // create consumer and subscription + final String subName = "my-sub"; + @Cleanup Consumer exclusiveConsumer = pulsarClient.newConsumer().topic(topic) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Exclusive) + .subscribe(); + + TopicStats topicStats = admin.topics().getPartitionedStats(topic, false); + assertEquals(topicStats.getSubscriptions().size(), 1); + assertEquals(topicStats.getSubscriptions().get(subName).getType(), SubscriptionType.Exclusive.toString()); + } + @Test public void testGetPartitionedStatsInternal() throws Exception { @@ -3040,6 +3110,9 @@ public void testTopicBundleRangeLookup() throws PulsarAdminException, PulsarServ TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test", "usw")); admin.tenants().updateTenant("prop-xyz", tenantInfo); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), + tenantInfo.getAllowedClusters())); admin.namespaces().createNamespace("prop-xyz/getBundleNs", 100); assertEquals(admin.namespaces().getPolicies("prop-xyz/getBundleNs").bundles.getNumBundles(), 100); @@ -3233,6 +3306,9 @@ public void testCreateAndDeleteNamespaceWithBundles() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test", "usw")); admin.tenants().updateTenant("prop-xyz", tenantInfo); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), + tenantInfo.getAllowedClusters())); String ns = BrokerTestUtil.newUniqueName("prop-xyz/ns"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTlsAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTlsAuthTest.java index bd23e0d7e4e85..2b7b9101a81f2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTlsAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTlsAuthTest.java @@ -65,19 +65,15 @@ @Test(groups = "broker-admin") public class AdminApiTlsAuthTest extends MockedPulsarServiceBaseTest { - private static String getTLSFile(String name) { - return String.format("./src/test/resources/authentication/tls-http/%s.pem", name); - } - @BeforeMethod @Override public void setup() throws Exception { conf.setLoadBalancerEnabled(true); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(getTLSFile("broker.cert")); - conf.setTlsKeyFilePath(getTLSFile("broker.key-pk8")); - conf.setTlsTrustCertsFilePath(getTLSFile("ca.cert")); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setAuthenticationEnabled(true); conf.setAuthenticationProviders( Set.of("org.apache.pulsar.broker.authentication.AuthenticationProviderTls")); @@ -87,8 +83,8 @@ public void setup() throws Exception { conf.setBrokerClientAuthenticationPlugin("org.apache.pulsar.client.impl.auth.AuthenticationTls"); conf.setBrokerClientAuthenticationParameters( - String.format("tlsCertFile:%s,tlsKeyFile:%s", getTLSFile("admin.cert"), getTLSFile("admin.key-pk8"))); - conf.setBrokerClientTrustCertsFilePath(getTLSFile("ca.cert")); + String.format("tlsCertFile:%s,tlsKeyFile:%s", getTlsFileForClient("admin.cert"), getTlsFileForClient("admin.key-pk8"))); + conf.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setBrokerClientTlsEnabled(true); conf.setNumExecutorThreadPoolSize(5); @@ -115,11 +111,11 @@ WebTarget buildWebClient(String user) throws Exception { .register(JacksonConfigurator.class).register(JacksonFeature.class); X509Certificate trustCertificates[] = SecurityUtility.loadCertificatesFromPemFile( - getTLSFile("ca.cert")); + CA_CERT_FILE_PATH); SSLContext sslCtx = SecurityUtility.createSslContext( false, trustCertificates, - SecurityUtility.loadCertificatesFromPemFile(getTLSFile(user + ".cert")), - SecurityUtility.loadPrivateKeyFromPemFile(getTLSFile(user + ".key-pk8"))); + SecurityUtility.loadCertificatesFromPemFile(getTlsFileForClient(user + ".cert")), + SecurityUtility.loadPrivateKeyFromPemFile(getTlsFileForClient(user + ".key-pk8"))); clientBuilder.sslContext(sslCtx).hostnameVerifier(NoopHostnameVerifier.INSTANCE); Client client = clientBuilder.build(); @@ -133,8 +129,8 @@ PulsarAdmin buildAdminClient(String user) throws Exception { .serviceHttpUrl(brokerUrlTls.toString()) .authentication("org.apache.pulsar.client.impl.auth.AuthenticationTls", String.format("tlsCertFile:%s,tlsKeyFile:%s", - getTLSFile(user + ".cert"), getTLSFile(user + ".key-pk8"))) - .tlsTrustCertsFilePath(getTLSFile("ca.cert")).build(); + getTlsFileForClient(user + ".cert"), getTlsFileForClient(user + ".key-pk8"))) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).build(); } PulsarClient buildClient(String user) throws Exception { @@ -143,8 +139,8 @@ PulsarClient buildClient(String user) throws Exception { .enableTlsHostnameVerification(false) .authentication("org.apache.pulsar.client.impl.auth.AuthenticationTls", String.format("tlsCertFile:%s,tlsKeyFile:%s", - getTLSFile(user + ".cert"), getTLSFile(user + ".key-pk8"))) - .tlsTrustCertsFilePath(getTLSFile("ca.cert")).build(); + getTlsFileForClient(user + ".cert"), getTlsFileForClient(user + ".key-pk8"))) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).build(); } @Test @@ -471,11 +467,11 @@ public void testDeleteNamespace() throws Exception { public void testCertRefreshForPulsarAdmin() throws Exception { String adminUser = "admin"; String user2 = "user1"; - File keyFile = new File(getTLSFile("temp" + ".key-pk8")); + File keyFile = File.createTempFile("temp", ".key-pk8"); Path keyFilePath = Paths.get(keyFile.getAbsolutePath()); int autoCertRefreshTimeSec = 1; try { - Files.copy(Paths.get(getTLSFile(user2 + ".key-pk8")), keyFilePath, StandardCopyOption.REPLACE_EXISTING); + Files.copy(Paths.get(getTlsFileForClient(user2 + ".key-pk8")), keyFilePath, StandardCopyOption.REPLACE_EXISTING); PulsarAdmin admin = PulsarAdmin.builder() .allowTlsInsecureConnection(false) .enableTlsHostnameVerification(false) @@ -483,8 +479,8 @@ public void testCertRefreshForPulsarAdmin() throws Exception { .autoCertRefreshTime(autoCertRefreshTimeSec, TimeUnit.SECONDS) .authentication("org.apache.pulsar.client.impl.auth.AuthenticationTls", String.format("tlsCertFile:%s,tlsKeyFile:%s", - getTLSFile(adminUser + ".cert"), keyFile)) - .tlsTrustCertsFilePath(getTLSFile("ca.cert")).build(); + getTlsFileForClient(adminUser + ".cert"), keyFile)) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).build(); // try to call admin-api which should fail due to incorrect key-cert try { admin.tenants().createTenant("tenantX", @@ -496,7 +492,7 @@ public void testCertRefreshForPulsarAdmin() throws Exception { // replace correct key file Files.delete(keyFile.toPath()); Thread.sleep(2 * autoCertRefreshTimeSec * 1000); - Files.copy(Paths.get(getTLSFile(adminUser + ".key-pk8")), keyFilePath); + Files.copy(Paths.get(getTlsFileForClient(adminUser + ".key-pk8")), keyFilePath); MutableBoolean success = new MutableBoolean(false); retryStrategically((test) -> { try { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java index 046f2b4cf14c6..25ef8d8aee1fb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java @@ -302,7 +302,7 @@ public void clusters() throws Exception { NamespaceIsolationDataImpl policyData = NamespaceIsolationDataImpl.builder() .namespaces(Collections.singletonList("dummy/colo/ns")) - .primary(Collections.singletonList("localhost" + ":" + pulsar.getListenPortHTTP())) + .primary(Collections.singletonList(pulsar.getAdvertisedAddress())) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) .parameters(parameters1) @@ -722,11 +722,12 @@ public void brokers() throws Exception { assertTrue(res instanceof Set); Set activeBrokers = (Set) res; assertEquals(activeBrokers.size(), 1); - assertEquals(activeBrokers, Set.of(pulsar.getAdvertisedAddress() + ":" + pulsar.getListenPortHTTP().get())); + assertEquals(activeBrokers, Set.of(pulsar.getBrokerId())); Object leaderBrokerRes = asyncRequests(ctx -> brokers.getLeaderBroker(ctx)); assertTrue(leaderBrokerRes instanceof BrokerInfo); BrokerInfo leaderBroker = (BrokerInfo)leaderBrokerRes; - assertEquals(leaderBroker.getServiceUrl(), pulsar.getLeaderElectionService().getCurrentLeader().map(LeaderBroker::getServiceUrl).get()); + assertEquals(leaderBroker.getBrokerId(), + pulsar.getLeaderElectionService().getCurrentLeader().map(LeaderBroker::getBrokerId).get()); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java index 64b2a58ab86e8..f8aa3dc355d92 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java @@ -154,17 +154,17 @@ private void verifyBacklog(String topic, String subscription, int numEntries, in AnalyzeSubscriptionBacklogResult analyzeSubscriptionBacklogResult = admin.topics().analyzeSubscriptionBacklog(topic, subscription, Optional.empty()); - assertEquals(numEntries, analyzeSubscriptionBacklogResult.getEntries()); - assertEquals(numEntries, analyzeSubscriptionBacklogResult.getFilterAcceptedEntries()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRejectedEntries()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRescheduledEntries()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRescheduledEntries()); + assertEquals(analyzeSubscriptionBacklogResult.getEntries(), numEntries); + assertEquals(analyzeSubscriptionBacklogResult.getFilterAcceptedEntries(), numEntries); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRejectedEntries(), 0); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRescheduledEntries(), 0); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRescheduledEntries(), 0); - assertEquals(numMessages, analyzeSubscriptionBacklogResult.getMessages()); - assertEquals(numMessages, analyzeSubscriptionBacklogResult.getFilterAcceptedMessages()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRejectedMessages()); + assertEquals(analyzeSubscriptionBacklogResult.getMessages(), numMessages); + assertEquals(analyzeSubscriptionBacklogResult.getFilterAcceptedMessages(), numMessages); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRejectedMessages(), 0); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRescheduledMessages()); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRescheduledMessages(), 0); assertFalse(analyzeSubscriptionBacklogResult.isAborted()); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BrokerAdminClientTlsAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BrokerAdminClientTlsAuthTest.java index 54164c3d40ef5..0e4f1bccc814d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BrokerAdminClientTlsAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BrokerAdminClientTlsAuthTest.java @@ -48,10 +48,6 @@ public void beforeMethod(Method m) throws Exception { methodName = m.getName(); } - private static String getTLSFile(String name) { - return String.format("./src/test/resources/authentication/tls-http/%s.pem", name); - } - @BeforeMethod @Override public void setup() throws Exception { @@ -63,19 +59,19 @@ public void setup() throws Exception { private void buildConf(ServiceConfiguration conf) { conf.setLoadBalancerEnabled(true); - conf.setTlsCertificateFilePath(getTLSFile("broker.cert")); - conf.setTlsKeyFilePath(getTLSFile("broker.key-pk8")); - conf.setTlsTrustCertsFilePath(getTLSFile("ca.cert")); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setAuthenticationEnabled(true); - conf.setSuperUserRoles(Set.of("superproxy", "broker.pulsar.apache.org")); + conf.setSuperUserRoles(Set.of("superproxy", "broker-localhost-SAN")); conf.setAuthenticationProviders( Set.of("org.apache.pulsar.broker.authentication.AuthenticationProviderTls")); conf.setAuthorizationEnabled(true); conf.setBrokerClientTlsEnabled(true); - String str = String.format("tlsCertFile:%s,tlsKeyFile:%s", getTLSFile("broker.cert"), getTLSFile("broker.key-pk8")); + String str = String.format("tlsCertFile:%s,tlsKeyFile:%s", BROKER_CERT_FILE_PATH, BROKER_KEY_FILE_PATH); conf.setBrokerClientAuthenticationParameters(str); conf.setBrokerClientAuthenticationPlugin("org.apache.pulsar.client.impl.auth.AuthenticationTls"); - conf.setBrokerClientTrustCertsFilePath(getTLSFile("ca.cert")); + conf.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setTlsAllowInsecureConnection(true); conf.setNumExecutorThreadPoolSize(5); } @@ -93,8 +89,8 @@ PulsarAdmin buildAdminClient(String user) throws Exception { .serviceHttpUrl(brokerUrlTls.toString()) .authentication("org.apache.pulsar.client.impl.auth.AuthenticationTls", String.format("tlsCertFile:%s,tlsKeyFile:%s", - getTLSFile(user + ".cert"), getTLSFile(user + ".key-pk8"))) - .tlsTrustCertsFilePath(getTLSFile("ca.cert")).build(); + getTlsFileForClient(user + ".cert"), getTlsFileForClient(user + ".key-pk8"))) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).build(); } /** diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java index 5644be406a7ee..53b170d52e6d3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java @@ -32,6 +32,7 @@ import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.lang.reflect.Field; @@ -1294,6 +1295,14 @@ public void testForceDeleteNamespace() throws Exception { pulsar.getConfiguration().setForceDeleteNamespaceAllowed(false); } + @Test + public void testSetNamespaceReplicationCluters() throws Exception { + String namespace = BrokerTestUtil.newUniqueName(this.testTenant + "/namespace"); + admin.namespaces().createNamespace(namespace, 100); + assertThrows(PulsarAdminException.PreconditionFailedException.class, + () -> admin.namespaces().setNamespaceReplicationClusters(namespace, Set.of())); + } + @Test public void testForceDeleteNamespaceNotAllowed() throws Exception { assertFalse(pulsar.getConfiguration().isForceDeleteNamespaceAllowed()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java index 6949fe931ca5a..80a493188d112 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java @@ -126,6 +126,7 @@ public void initPersistentTopics() throws Exception { @Override @BeforeMethod protected void setup() throws Exception { + conf.setTopicLevelPoliciesEnabled(false); super.internalSetup(); persistentTopics = spy(PersistentTopics.class); persistentTopics.setServletContext(new MockServletContext()); @@ -1144,12 +1145,21 @@ public void testExamineMessage() throws Exception { // Check examine message not allowed on partitioned topic. try { admin.topics().examineMessage(topicName, "earliest", 1); + Assert.fail("fail to check examine message not allowed on partitioned topic"); } catch (PulsarAdminException e) { Assert.assertEquals(e.getMessage(), "Examine messages on a partitioned topic is not allowed, please try examine message on specific " + "topic partition"); } + try { + admin.topics().examineMessage(topicName + "-partition-0", "earliest", 1); + Assert.fail(); + } catch (PulsarAdminException e) { + Assert.assertEquals(e.getMessage(), + "Could not examine messages due to the total message is zero"); + } + producer.send("message1"); Assert.assertEquals( new String(admin.topics().examineMessage(topicName + "-partition-0", "earliest", 1).getData()), @@ -1360,21 +1370,12 @@ public void testGetMessageById() throws Exception { Message message2 = admin.topics().getMessageById(topicName2, id2.getLedgerId(), id2.getEntryId()); Assert.assertEquals(message2.getData(), data2.getBytes()); - Message message3 = null; - try { - message3 = admin.topics().getMessageById(topicName2, id1.getLedgerId(), id1.getEntryId()); - Assert.fail(); - } catch (Exception e) { - Assert.assertNull(message3); - } - - Message message4 = null; - try { - message4 = admin.topics().getMessageById(topicName1, id2.getLedgerId(), id2.getEntryId()); - Assert.fail(); - } catch (Exception e) { - Assert.assertNull(message4); - } + Assert.expectThrows(PulsarAdminException.NotFoundException.class, () -> { + admin.topics().getMessageById(topicName2, id1.getLedgerId(), id1.getEntryId()); + }); + Assert.expectThrows(PulsarAdminException.NotFoundException.class, () -> { + admin.topics().getMessageById(topicName1, id2.getLedgerId(), id2.getEntryId()); + }); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java index d447787d1b7cb..a75ae78cef393 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java @@ -27,7 +27,10 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -40,6 +43,7 @@ import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; +import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; import org.apache.pulsar.common.policies.data.TopicType; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -55,6 +59,7 @@ protected void setup() throws Exception { conf.setAllowAutoTopicCreationType(TopicType.PARTITIONED); conf.setAllowAutoTopicCreation(true); conf.setDefaultNumPartitions(3); + conf.setForceDeleteNamespaceAllowed(true); super.internalSetup(); super.producerBaseSetup(); } @@ -135,6 +140,8 @@ public void testPartitionedTopicAutoCreationForbiddenDuringNamespaceDeletion() new InetSocketAddress(pulsar.getAdvertisedAddress(), pulsar.getBrokerListenPort().get()); return CompletableFuture.completedFuture(Pair.of(brokerAddress, brokerAddress)); }); + final String topicPoliciesServiceInitException + = "Topic creation encountered an exception by initialize topic policies service"; // Creating a producer and creating a Consumer may trigger automatic topic // creation, let's try to create a Producer and a Consumer @@ -142,20 +149,24 @@ public void testPartitionedTopicAutoCreationForbiddenDuringNamespaceDeletion() .sendTimeout(1, TimeUnit.SECONDS) .topic(topic) .create()) { - } catch (PulsarClientException.LookupException expected) { - String msg = "Namespace bundle for topic (%s) not served by this instance"; + } catch (PulsarClientException.TopicDoesNotExistException expected) { + // Since the "policies.deleted" is "true", the value of "isAllowAutoTopicCreationAsync" will be false, + // so the "TopicDoesNotExistException" is expected. log.info("Expected error", expected); - assertTrue(expected.getMessage().contains(String.format(msg, topic))); + assertTrue(expected.getMessage().contains(topic) + || expected.getMessage().contains(topicPoliciesServiceInitException)); } try (Consumer ignored = pulsarClient.newConsumer() .topic(topic) .subscriptionName("test") .subscribe()) { - } catch (PulsarClientException.LookupException expected) { - String msg = "Namespace bundle for topic (%s) not served by this instance"; + } catch (PulsarClientException.TopicDoesNotExistException expected) { + // Since the "policies.deleted" is "true", the value of "isAllowAutoTopicCreationAsync" will be false, + // so the "TopicDoesNotExistException" is expected. log.info("Expected error", expected); - assertTrue(expected.getMessage().contains(String.format(msg, topic))); + assertTrue(expected.getMessage().contains(topic) + || expected.getMessage().contains(topicPoliciesServiceInitException)); } @@ -182,4 +193,56 @@ public void testPartitionedTopicAutoCreationForbiddenDuringNamespaceDeletion() } } + + @Test + public void testClientWithAutoCreationGotNotFoundException() throws PulsarAdminException, PulsarClientException { + final String namespace = "public/test_1"; + final String topicName = "persistent://public/test_1/test_auto_creation_got_not_found" + + System.currentTimeMillis(); + final int retryTimes = 30; + admin.namespaces().createNamespace(namespace); + admin.namespaces().setAutoTopicCreation(namespace, AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(true) + .topicType("non-partitioned") + .build()); + + @Cleanup("shutdown") + final ExecutorService executor1 = Executors.newSingleThreadExecutor(); + + @Cleanup("shutdown") + final ExecutorService executor2 = Executors.newSingleThreadExecutor(); + + for (int i = 0; i < retryTimes; i++) { + final CompletableFuture adminListSub = CompletableFuture.runAsync(() -> { + try { + admin.topics().getSubscriptions(topicName); + } catch (PulsarAdminException e) { + throw new RuntimeException(e); + } + }, executor1); + + final CompletableFuture> consumerSub = CompletableFuture.supplyAsync(() -> { + try { + return pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName("sub-1") + .subscribe(); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + }, executor2); + + try { + adminListSub.join(); + } catch (Throwable ex) { + // we don't care the exception. + } + + consumerSub.join().close(); + admin.topics().delete(topicName, true); + } + + admin.namespaces().deleteNamespace(namespace, true); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesAuthZTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesAuthZTest.java new file mode 100644 index 0000000000000..9cf4c111dddbc --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesAuthZTest.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import static org.awaitility.Awaitility.await; +import io.jsonwebtoken.Jwts; +import java.util.Set; +import java.util.UUID; +import lombok.Cleanup; +import lombok.SneakyThrows; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.policies.data.AuthAction; +import org.apache.pulsar.common.policies.data.RetentionPolicies; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.security.MockedPulsarStandalone; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + + +public final class TopicPoliciesAuthZTest extends MockedPulsarStandalone { + + private PulsarAdmin superUserAdmin; + + private PulsarAdmin tenantManagerAdmin; + + private static final String TENANT_ADMIN_SUBJECT = UUID.randomUUID().toString(); + private static final String TENANT_ADMIN_TOKEN = Jwts.builder() + .claim("sub", TENANT_ADMIN_SUBJECT).signWith(SECRET_KEY).compact(); + + @SneakyThrows + @BeforeClass + public void before() { + configureTokenAuthentication(); + configureDefaultAuthorization(); + start(); + this.superUserAdmin =PulsarAdmin.builder() + .serviceHttpUrl(getPulsarService().getWebServiceAddress()) + .authentication(new AuthenticationToken(SUPER_USER_TOKEN)) + .build(); + final TenantInfo tenantInfo = superUserAdmin.tenants().getTenantInfo("public"); + tenantInfo.getAdminRoles().add(TENANT_ADMIN_SUBJECT); + superUserAdmin.tenants().updateTenant("public", tenantInfo); + this.tenantManagerAdmin = PulsarAdmin.builder() + .serviceHttpUrl(getPulsarService().getWebServiceAddress()) + .authentication(new AuthenticationToken(TENANT_ADMIN_TOKEN)) + .build(); + } + + + @SneakyThrows + @AfterClass + public void after() { + close(); + } + + + @SneakyThrows + @Test + public void testRetention() { + final String random = UUID.randomUUID().toString(); + final String topic = "persistent://public/default/" + random; + final String subject = UUID.randomUUID().toString(); + final String token = Jwts.builder() + .claim("sub", subject).signWith(SECRET_KEY).compact(); + superUserAdmin.topics().createNonPartitionedTopic(topic); + + @Cleanup + final PulsarAdmin subAdmin = PulsarAdmin.builder() + .serviceHttpUrl(getPulsarService().getWebServiceAddress()) + .authentication(new AuthenticationToken(token)) + .build(); + final RetentionPolicies definedRetentionPolicy = new RetentionPolicies(1, 1); + // test superuser + superUserAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + + // because the topic policies is eventual consistency, we should wait here + await().untilAsserted(() -> { + final RetentionPolicies receivedRetentionPolicy = superUserAdmin.topicPolicies().getRetention(topic); + Assert.assertEquals(receivedRetentionPolicy, definedRetentionPolicy); + }); + superUserAdmin.topicPolicies().removeRetention(topic); + + await().untilAsserted(() -> { + final RetentionPolicies retention = superUserAdmin.topicPolicies().getRetention(topic); + Assert.assertNull(retention); + }); + + // test tenant manager + + tenantManagerAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + await().untilAsserted(() -> { + final RetentionPolicies receivedRetentionPolicy = tenantManagerAdmin.topicPolicies().getRetention(topic); + Assert.assertEquals(receivedRetentionPolicy, definedRetentionPolicy); + }); + tenantManagerAdmin.topicPolicies().removeRetention(topic); + await().untilAsserted(() -> { + final RetentionPolicies retention = tenantManagerAdmin.topicPolicies().getRetention(topic); + Assert.assertNull(retention); + }); + + // test nobody + + try { + subAdmin.topicPolicies().getRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + + subAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + subAdmin.topicPolicies().removeRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + // test sub user with permissions + for (AuthAction action : AuthAction.values()) { + superUserAdmin.namespaces().grantPermissionOnNamespace("public/default", + subject, Set.of(action)); + try { + subAdmin.topicPolicies().getRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + + subAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + subAdmin.topicPolicies().removeRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + superUserAdmin.namespaces().revokePermissionsOnNamespace("public/default", subject); + } + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java index a84e955cc9d81..c5111b47d2082 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java @@ -23,6 +23,7 @@ import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.lang.reflect.Field; @@ -41,6 +42,8 @@ import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.ConfigHelper; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.namespace.NamespaceService; @@ -49,6 +52,7 @@ import org.apache.pulsar.broker.service.SystemTopicBasedTopicPoliciesService; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.persistent.SubscribeRateLimiter; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -82,8 +86,11 @@ import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.assertj.core.api.Assertions; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -133,6 +140,22 @@ public void cleanup() throws Exception { super.internalCleanup(); } + @Test + public void updatePropertiesForAutoCreatedTopicTest() throws Exception { + TopicName topicName = TopicName.get( + TopicDomain.persistent.value(), + NamespaceName.get(myNamespace), + "test-" + UUID.randomUUID() + ); + String testTopic = topicName.toString(); + @Cleanup + Producer producer = pulsarClient.newProducer().topic(testTopic).create(); + HashMap properties = new HashMap<>(); + properties.put("backlogQuotaType", "message_age"); + admin.topics().updateProperties(testTopic, properties); + admin.topics().delete(topicName.toString(), true); + } + @Test public void testTopicPolicyInitialValueWithNamespaceAlreadyLoaded() throws Exception{ TopicName topicName = TopicName.get( @@ -160,11 +183,11 @@ public void testTopicPolicyInitialValueWithNamespaceAlreadyLoaded() throws Excep //load the nameserver, but topic is not init. log.info("lookup:{}",admin.lookups().lookupTopic(topic)); - assertTrue(pulsar.getBrokerService().isTopicNsOwnedByBroker(topicName)); + assertTrue(pulsar.getBrokerService().isTopicNsOwnedByBrokerAsync(topicName).join()); assertFalse(pulsar.getBrokerService().getTopics().containsKey(topic)); //make sure namespace policy reader is fully started. Awaitility.await().untilAsserted(()-> { - assertTrue(policyService.getPoliciesCacheInit(topicName.getNamespaceObject())); + assertTrue(policyService.getPoliciesCacheInit(topicName.getNamespaceObject()).isDone()); }); //load the topic. @@ -2974,6 +2997,10 @@ public void testReplicatorClusterApi() throws Exception { admin.topics().removeReplicationClusters(topic); Awaitility.await().untilAsserted(() -> assertNull(admin.topics().getReplicationClusters(topic, false))); + + assertThrows(PulsarAdminException.PreconditionFailedException.class, () -> admin.topics().setReplicationClusters(topic, List.of())); + assertThrows(PulsarAdminException.PreconditionFailedException.class, () -> admin.topics().setReplicationClusters(topic, null)); + } @Test @@ -2999,6 +3026,7 @@ public void testLoopCreateAndDeleteTopicPolicies() throws Exception { }); } } + @Test public void testGlobalTopicPolicies() throws Exception { final String topic = testTopic + UUID.randomUUID(); @@ -3139,4 +3167,49 @@ public void testProduceChangesWithEncryptionRequired() throws Exception { }); } + @Test + public void testUpdateRetentionWithPartialFailure() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://" + myNamespace + "/tp"); + admin.topics().createNonPartitionedTopic(tpName); + + // Load topic up. + admin.topics().getInternalStats(tpName); + + // Inject an error that makes dispatch rate update fail. + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(tpName, false).join().get(); + ConcurrentOpenHashMap subscriptions = + WhiteboxImpl.getInternalState(persistentTopic, "subscriptions"); + PersistentSubscription mockedSubscription = Mockito.mock(PersistentSubscription.class); + Mockito.when(mockedSubscription.getDispatcher()).thenThrow(new RuntimeException("Mocked error: getDispatcher")); + subscriptions.put("mockedSubscription", mockedSubscription); + + // Update namespace-level retention policies. + RetentionPolicies retentionPolicies1 = new RetentionPolicies(1, 1); + admin.namespaces().setRetentionAsync(myNamespace, retentionPolicies1); + + // Verify: update retention will be success even if other component update throws exception. + Awaitility.await().untilAsserted(() -> { + ManagedLedgerImpl ML = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + assertEquals(ML.getConfig().getRetentionSizeInMB(), 1); + assertEquals(ML.getConfig().getRetentionTimeMillis(), 1 * 60 * 1000); + }); + + // Update topic-level retention policies. + RetentionPolicies retentionPolicies2 = new RetentionPolicies(2, 2); + admin.topics().setRetentionAsync(tpName, retentionPolicies2); + + // Verify: update retention will be success even if other component update throws exception. + Awaitility.await().untilAsserted(() -> { + ManagedLedgerImpl ML = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + assertEquals(ML.getConfig().getRetentionSizeInMB(), 2); + assertEquals(ML.getConfig().getRetentionTimeMillis(), 2 * 60 * 1000); + }); + + // Cleanup. + subscriptions.clear(); + admin.namespaces().removeRetention(myNamespace); + admin.topics().delete(tpName, false); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesWithBrokerRestartTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesWithBrokerRestartTest.java new file mode 100644 index 0000000000000..672fc2c95f890 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesWithBrokerRestartTest.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.common.policies.data.RetentionPolicies; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.TimeUnit; + +@Slf4j +@Test(groups = "broker-admin") +public class TopicPoliciesWithBrokerRestartTest extends MockedPulsarServiceBaseTest { + + @Override + @BeforeClass(alwaysRun = true) + protected void setup() throws Exception { + super.internalSetup(); + setupDefaultTenantAndNamespace(); + } + + @Override + @AfterClass(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + + @Test + public void testRetentionWithBrokerRestart() throws Exception { + final int messages = 1_000; + final int topicNum = 500; + // (1) Init topic + admin.namespaces().createNamespace("public/retention"); + final String topicName = "persistent://public/retention/retention_with_broker_restart"; + admin.topics().createNonPartitionedTopic(topicName); + for (int i = 0; i < topicNum; i++) { + final String shadowTopicNames = topicName + "_" + i; + admin.topics().createNonPartitionedTopic(shadowTopicNames); + } + // (2) Set retention + final RetentionPolicies retentionPolicies = new RetentionPolicies(20, 20); + for (int i = 0; i < topicNum; i++) { + final String shadowTopicNames = topicName + "_" + i; + admin.topicPolicies().setRetention(shadowTopicNames, retentionPolicies); + } + admin.topicPolicies().setRetention(topicName, retentionPolicies); + // (3) Send messages + @Cleanup + final Producer publisher = pulsarClient.newProducer() + .topic(topicName) + .create(); + for (int i = 0; i < messages; i++) { + publisher.send((i + "").getBytes(StandardCharsets.UTF_8)); + } + // (4) Check configuration + Awaitility.await().untilAsserted(() -> { + final PersistentTopic persistentTopic1 = (PersistentTopic) + pulsar.getBrokerService().getTopic(topicName, true).join().get(); + final ManagedLedgerImpl managedLedger1 = (ManagedLedgerImpl) persistentTopic1.getManagedLedger(); + Assert.assertEquals(managedLedger1.getConfig().getRetentionSizeInMB(), 20); + Assert.assertEquals(managedLedger1.getConfig().getRetentionTimeMillis(), + TimeUnit.MINUTES.toMillis(20)); + }); + // (5) Restart broker + restartBroker(); + // (6) Check configuration again + for (int i = 0; i < topicNum; i++) { + final String shadowTopicNames = topicName + "_" + i; + admin.lookups().lookupTopic(shadowTopicNames); + final PersistentTopic persistentTopicTmp = (PersistentTopic) + pulsar.getBrokerService().getTopic(shadowTopicNames, true).join().get(); + final ManagedLedgerImpl managedLedgerTemp = (ManagedLedgerImpl) persistentTopicTmp.getManagedLedger(); + Assert.assertEquals(managedLedgerTemp.getConfig().getRetentionSizeInMB(), 20); + Assert.assertEquals(managedLedgerTemp.getConfig().getRetentionTimeMillis(), + TimeUnit.MINUTES.toMillis(20)); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java index efd8b66d754ac..234af7afa8d09 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java @@ -84,6 +84,8 @@ protected void setup() throws Exception { Set providers = new HashSet<>(); providers.add(AuthenticationProviderToken.class.getName()); conf.setAuthenticationProviders(providers); + conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + conf.setBrokerClientAuthenticationParameters("token:" + ADMIN_TOKEN); super.internalSetup(); PulsarAdminBuilder pulsarAdminBuilder = PulsarAdmin.builder().serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsWithoutTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsWithoutTlsTest.java new file mode 100644 index 0000000000000..88bf2f8f42108 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsWithoutTlsTest.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import lombok.Cleanup; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationDataHttps; +import org.apache.pulsar.broker.rest.Topics; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.naming.TopicDomain; +import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.websocket.data.ProducerMessage; +import org.apache.pulsar.websocket.data.ProducerMessages; +import org.mockito.ArgumentCaptor; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import javax.ws.rs.container.AsyncResponse; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.net.URI; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +public class TopicsWithoutTlsTest extends MockedPulsarServiceBaseTest { + + private Topics topics; + private final String testLocalCluster = "test"; + private final String testTenant = "my-tenant"; + private final String testNamespace = "my-namespace"; + private final String testTopicName = "my-topic"; + + @Override + @BeforeMethod + protected void setup() throws Exception { + super.internalSetup(); + topics = spy(new Topics()); + topics.setPulsar(pulsar); + doReturn(TopicDomain.persistent.value()).when(topics).domain(); + doReturn("test-app").when(topics).clientAppId(); + doReturn(mock(AuthenticationDataHttps.class)).when(topics).clientAuthData(); + admin.clusters().createCluster(testLocalCluster, new ClusterDataImpl()); + admin.tenants().createTenant(testTenant, new TenantInfoImpl(Set.of("role1", "role2"), Set.of(testLocalCluster))); + admin.namespaces().createNamespace(testTenant + "/" + testNamespace, + Set.of(testLocalCluster)); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + this.conf.setBrokerServicePortTls(Optional.empty()); + this.conf.setWebServicePortTls(Optional.empty()); + } + + @Override + @AfterMethod + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testLookUpWithRedirect() throws Exception { + String topicName = "persistent://" + testTenant + "/" + testNamespace + "/" + testTopicName; + URI requestPath = URI.create(pulsar.getWebServiceAddress() + "/topics/my-tenant/my-namespace/my-topic"); + //create topic on one broker + admin.topics().createNonPartitionedTopic(topicName); + conf.setBrokerServicePort(Optional.of(0)); + conf.setWebServicePort(Optional.of(0)); + @Cleanup + PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(conf); + PulsarService pulsar2 = pulsarTestContext2.getPulsarService(); + doReturn(false).when(topics).isRequestHttps(); + UriInfo uriInfo = mock(UriInfo.class); + doReturn(requestPath).when(uriInfo).getRequestUri(); + FieldUtils.writeField(topics, "uri", uriInfo, true); + //do produce on another broker + topics.setPulsar(pulsar2); + AsyncResponse asyncResponse = mock(AsyncResponse.class); + ProducerMessages producerMessages = new ProducerMessages(); + producerMessages.setValueSchema(ObjectMapperFactory.getMapper().getObjectMapper(). + writeValueAsString(Schema.INT64.getSchemaInfo())); + String message = "[]"; + producerMessages.setMessages(createMessages(message)); + topics.produceOnPersistentTopic(asyncResponse, testTenant, testNamespace, testTopicName, false, producerMessages); + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Response.class); + verify(asyncResponse, timeout(5000).times(1)).resume(responseCaptor.capture()); + // Verify got redirect response + Assert.assertEquals(responseCaptor.getValue().getStatusInfo(), Response.Status.TEMPORARY_REDIRECT); + // Verify URI point to address of broker the topic was created on + Assert.assertEquals(responseCaptor.getValue().getLocation().toString(), requestPath.toString()); + } + + private static List createMessages(String message) throws JsonProcessingException { + return ObjectMapperFactory.getMapper().reader() + .forType(new TypeReference>() { + }).readValue(message); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java index b2052cdcbf0e0..be4f735e91a65 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java @@ -127,9 +127,6 @@ public class V1_AdminApiTest extends MockedPulsarServiceBaseTest { private static final Logger LOG = LoggerFactory.getLogger(V1_AdminApiTest.class); - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/certificate/server.crt"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/certificate/server.key"; - private MockedPulsarService mockPulsarSetup; private PulsarService otherPulsar; @@ -147,15 +144,15 @@ public void setup() throws Exception { conf.setLoadBalancerEnabled(true); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setNumExecutorThreadPoolSize(5); super.internalSetup(); bundleFactory = new NamespaceBundleFactory(pulsar, Hashing.crc32()); - adminTls = spy(PulsarAdmin.builder().tlsTrustCertsFilePath(TLS_SERVER_CERT_FILE_PATH) + adminTls = spy(PulsarAdmin.builder().tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .serviceHttpUrl(brokerUrlTls.toString()).build()); // create otherbroker to test redirect on calls that need @@ -452,7 +449,7 @@ public void brokers() throws Exception { for (String ns : nsMap.keySet()) { NamespaceOwnershipStatus nsStatus = nsMap.get(ns); if (ns.equals( - NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()) + NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfiguration()) + "/0x00000000_0xffffffff")) { assertEquals(nsStatus.broker_assignment, BrokerAssignment.shared); assertFalse(nsStatus.is_controlled); @@ -460,10 +457,7 @@ public void brokers() throws Exception { } } - String[] parts = list.get(0).split(":"); - Assert.assertEquals(parts.length, 2); - Map nsMap2 = adminTls.brokers().getOwnedNamespaces("use", - String.format("%s:%d", parts[0], pulsar.getListenPortHTTPS().get())); + Map nsMap2 = adminTls.brokers().getOwnedNamespaces("use", list.get(0)); Assert.assertEquals(nsMap2.size(), 2); admin.namespaces().deleteNamespace("prop-xyz/use/ns1"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java index 0e51470da75a5..94c955f85ebe2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java @@ -160,7 +160,7 @@ public void testGetTransactionInBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -170,7 +170,7 @@ public void testGetTransactionInBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).sendTimeout(0, TimeUnit.SECONDS).create(); @@ -205,7 +205,7 @@ public void testGetTransactionInPendingAckStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -216,7 +216,7 @@ public void testGetTransactionInPendingAckStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).create(); @@ -331,7 +331,7 @@ public void testGetTransactionBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -341,7 +341,7 @@ public void testGetTransactionBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES) @@ -389,7 +389,7 @@ public void testGetPendingAckStats(String ackType) throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -399,7 +399,7 @@ public void testGetPendingAckStats(String ackType) throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); @@ -538,7 +538,7 @@ public void testGetPendingAckInternalStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -548,7 +548,7 @@ public void testGetPendingAckInternalStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).create(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java index 6ffcecbeb9f8b..942a42fa7aaa1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java @@ -60,6 +60,8 @@ public void setup() throws Exception { conf.setAuthorizationEnabled(true); conf.setAuthorizationAllowWildcardsMatching(true); conf.setSuperUserRoles(Sets.newHashSet("super")); + conf.setBrokerClientAuthenticationPlugin(MockAuthentication.class.getName()); + conf.setBrokerClientAuthenticationParameters("user:pass.pass"); internalSetup(); try (PulsarAdmin admin = PulsarAdmin.builder() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java index 0b1726617f71f..25ac59796b02c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java @@ -29,7 +29,10 @@ public class MockAuthentication implements Authentication { private static final Logger log = LoggerFactory.getLogger(MockAuthentication.class); - private final String user; + private String user; + + public MockAuthentication() { + } public MockAuthentication(String user) { this.user = user; @@ -67,6 +70,7 @@ public String getCommandData() { @Override public void configure(Map authParams) { + this.user = authParams.get("user"); } @Override diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockOIDCIdentityProvider.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockOIDCIdentityProvider.java new file mode 100644 index 0000000000000..5d29c443d2bde --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockOIDCIdentityProvider.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.auth; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.matching; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.common.FileSource; +import com.github.tomakehurst.wiremock.extension.Parameters; +import com.github.tomakehurst.wiremock.extension.ResponseTransformer; +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.http.Response; +import io.jsonwebtoken.SignatureAlgorithm; +import io.jsonwebtoken.impl.DefaultJwtBuilder; +import io.jsonwebtoken.security.Keys; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.security.KeyPair; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.util.Base64; +import java.util.Date; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Mock OIDC (and therefore OAuth2) server for testing. Note that the client_id is mapped to the token's subject claim. + */ +public class MockOIDCIdentityProvider { + private final WireMockServer server; + private final PublicKey publicKey; + private final String audience; + public MockOIDCIdentityProvider(String clientSecret, String audience, long tokenTTLMillis) { + this.audience = audience; + KeyPair keyPair = Keys.keyPairFor(SignatureAlgorithm.RS256); + publicKey = keyPair.getPublic(); + server = new WireMockServer(wireMockConfig().port(0) + .extensions(new OAuth2Transformer(keyPair, tokenTTLMillis))); + server.start(); + + // Set up a correct openid-configuration that points to the next stub + server.stubFor( + get(urlEqualTo("/.well-known/openid-configuration")) + .willReturn(aResponse() + .withHeader("Content-Type", "application/json") + .withBody(""" + { + "issuer": "%s", + "token_endpoint": "%s/oauth/token" + } + """.replace("%s", server.baseUrl())))); + + // Only respond when the client sends the expected request body + server.stubFor(post(urlEqualTo("/oauth/token")) + .withRequestBody(matching(".*grant_type=client_credentials.*")) + .withRequestBody(matching(".*audience=" + URLEncoder.encode(audience, StandardCharsets.UTF_8) + ".*")) + .withRequestBody(matching(".*client_id=.*")) + .withRequestBody(matching(".*client_secret=" + clientSecret + "(&.*|$)")) + .willReturn(aResponse().withTransformers("o-auth-token-transformer").withStatus(200))); + } + + public void stop() { + server.stop(); + } + + public String getBase64EncodedPublicKey() { + return Base64.getEncoder().encodeToString(publicKey.getEncoded()); + } + + public String getIssuer() { + return server.baseUrl(); + } + + class OAuth2Transformer extends ResponseTransformer { + + private final PrivateKey privateKey; + private final long tokenTTL; + + private final Pattern clientIdToRolePattern = Pattern.compile("client_id=([A-Za-z0-9-]*)(&|$)"); + + OAuth2Transformer(KeyPair key, long tokenTTLMillis) { + this.privateKey = key.getPrivate(); + this.tokenTTL = tokenTTLMillis; + } + + @Override + public Response transform(Request request, Response response, FileSource files, Parameters parameters) { + Matcher m = clientIdToRolePattern.matcher(request.getBodyAsString()); + if (m.find()) { + String role = m.group(1); + return Response.Builder.like(response).but().body(""" + { + "access_token": "%s", + "expires_in": %d, + "token_type":"Bearer" + } + """.formatted(generateToken(role), + TimeUnit.MILLISECONDS.toSeconds(tokenTTL))).build(); + } else { + return Response.Builder.like(response).but().body("Invalid request").status(400).build(); + } + } + + @Override + public String getName() { + return "o-auth-token-transformer"; + } + + @Override + public boolean applyGlobally() { + return false; + } + + private String generateToken(String role) { + long now = System.currentTimeMillis(); + DefaultJwtBuilder defaultJwtBuilder = new DefaultJwtBuilder(); + defaultJwtBuilder.setHeaderParam("typ", "JWT"); + defaultJwtBuilder.setHeaderParam("alg", "RS256"); + defaultJwtBuilder.setIssuer(server.baseUrl()); + defaultJwtBuilder.setSubject(role); + defaultJwtBuilder.setAudience(audience); + defaultJwtBuilder.setIssuedAt(new Date(now)); + defaultJwtBuilder.setNotBefore(new Date(now)); + defaultJwtBuilder.setExpiration(new Date(now + tokenTTL)); + defaultJwtBuilder.signWith(privateKey); + return defaultJwtBuilder.compact(); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java index 3fe8c22b1c4de..7380912e4247a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java @@ -18,9 +18,11 @@ */ package org.apache.pulsar.broker.auth; -import static org.apache.pulsar.broker.BrokerTestUtil.*; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithoutRecordingInvocations; +import static org.testng.Assert.assertEquals; import com.google.common.collect.Sets; import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; @@ -37,10 +39,13 @@ import java.util.function.Predicate; import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.container.TimeoutHandler; +import lombok.AllArgsConstructor; +import lombok.Data; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminBuilder; @@ -48,6 +53,9 @@ import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.impl.ConnectionPool; +import org.apache.pulsar.client.impl.ProducerImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; @@ -55,17 +63,33 @@ import org.apache.pulsar.tests.TestRetrySupport; import org.apache.pulsar.utils.ResourceUtils; import org.apache.zookeeper.MockZooKeeper; +import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.mockito.Mockito; import org.mockito.internal.util.MockUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.testcontainers.shaded.org.awaitility.Awaitility; import org.testng.annotations.DataProvider; /** * Base class for all tests that need a Pulsar instance without a ZK and BK cluster. */ public abstract class MockedPulsarServiceBaseTest extends TestRetrySupport { + // All certificate-authority files are copied from the tests/certificate-authority directory and all share the same + // root CA. + protected static String getTlsFileForClient(String name) { + return ResourceUtils.getAbsolutePath(String.format("certificate-authority/client-keys/%s.pem", name)); + } + public final static String CA_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); + public final static String BROKER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + public final static String BROKER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + public final static String PROXY_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/proxy.cert.pem"); + public final static String PROXY_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/proxy.key-pk8.pem"); public final static String BROKER_KEYSTORE_FILE_PATH = ResourceUtils.getAbsolutePath("certificate-authority/jks/broker.keystore.jks"); public final static String BROKER_TRUSTSTORE_FILE_PATH = @@ -206,10 +230,8 @@ protected void doInitConf() throws Exception { this.conf.setBrokerShutdownTimeoutMs(0L); this.conf.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); this.conf.setBrokerServicePort(Optional.of(0)); - this.conf.setBrokerServicePortTls(Optional.of(0)); this.conf.setAdvertisedAddress("localhost"); this.conf.setWebServicePort(Optional.of(0)); - this.conf.setWebServicePortTls(Optional.of(0)); this.conf.setNumExecutorThreadPoolSize(5); this.conf.setExposeBundlesMetricsInPrometheus(true); } @@ -449,9 +471,7 @@ protected ServiceConfiguration getDefaultConf() { configuration.setBrokerShutdownTimeoutMs(0L); configuration.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); configuration.setBrokerServicePort(Optional.of(0)); - configuration.setBrokerServicePortTls(Optional.of(0)); configuration.setWebServicePort(Optional.of(0)); - configuration.setWebServicePortTls(Optional.of(0)); configuration.setBookkeeperClientExposeStatsToPrometheus(true); configuration.setNumExecutorThreadPoolSize(5); configuration.setBrokerMaxConnections(0); @@ -623,5 +643,43 @@ public Object[][] incorrectPersistentPolicies() { }; } + protected ServiceProducer getServiceProducer(ProducerImpl clientProducer, String topicName) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).join().get(); + org.apache.pulsar.broker.service.Producer serviceProducer = + persistentTopic.getProducers().get(clientProducer.getProducerName()); + long clientProducerId = WhiteboxImpl.getInternalState(clientProducer, "producerId"); + assertEquals(serviceProducer.getProducerId(), clientProducerId); + assertEquals(serviceProducer.getEpoch(), clientProducer.getConnectionHandler().getEpoch()); + return new ServiceProducer(serviceProducer, persistentTopic); + } + + @Data + @AllArgsConstructor + public static class ServiceProducer { + private org.apache.pulsar.broker.service.Producer serviceProducer; + private PersistentTopic persistentTopic; + } + + protected void sleepSeconds(int seconds){ + try { + Thread.sleep(1000 * seconds); + } catch (InterruptedException e) { + log.warn("This thread has been interrupted", e); + Thread.currentThread().interrupt(); + } + } + + private static void reconnectAllConnections(PulsarClientImpl c) throws Exception { + ConnectionPool pool = c.getCnxPool(); + Method closeAllConnections = ConnectionPool.class.getDeclaredMethod("closeAllConnections", new Class[]{}); + closeAllConnections.setAccessible(true); + closeAllConnections.invoke(pool, new Object[]{}); + } + + protected void reconnectAllConnections() throws Exception { + reconnectAllConnections((PulsarClientImpl) pulsarClient); + } + private static final Logger log = LoggerFactory.getLogger(MockedPulsarServiceBaseTest.class); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/BookkeeperBucketSnapshotStorageTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/BookkeeperBucketSnapshotStorageTest.java index a628b58e10d32..d26f38fa2bc2b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/BookkeeperBucketSnapshotStorageTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/BookkeeperBucketSnapshotStorageTest.java @@ -29,7 +29,11 @@ import java.util.concurrent.ExecutionException; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.delayed.bucket.BookkeeperBucketSnapshotStorage; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.SnapshotMetadata; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegmentMetadata; +import org.apache.pulsar.common.util.FutureUtil; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; @@ -59,9 +63,8 @@ protected void cleanup() throws Exception { @Test public void testCreateSnapshot() throws ExecutionException, InterruptedException { - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata snapshotMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata.newBuilder().build(); - List bucketSnapshotSegments = new ArrayList<>(); + SnapshotMetadata snapshotMetadata = SnapshotMetadata.newBuilder().build(); + List bucketSnapshotSegments = new ArrayList<>(); CompletableFuture future = bucketSnapshotStorage.createBucketSnapshot(snapshotMetadata, bucketSnapshotSegments, UUID.randomUUID().toString(), TOPIC_NAME, CURSOR_NAME); @@ -71,24 +74,23 @@ public void testCreateSnapshot() throws ExecutionException, InterruptedException @Test public void testGetSnapshot() throws ExecutionException, InterruptedException { - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata segmentMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata.newBuilder() + SnapshotSegmentMetadata segmentMetadata = + SnapshotSegmentMetadata.newBuilder() .setMinScheduleTimestamp(System.currentTimeMillis()) .setMaxScheduleTimestamp(System.currentTimeMillis()) .putDelayedIndexBitMap(100L, ByteString.copyFrom(new byte[1])).build(); - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata snapshotMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata.newBuilder() + SnapshotMetadata snapshotMetadata = + SnapshotMetadata.newBuilder() .addMetadataList(segmentMetadata) .build(); - List bucketSnapshotSegments = new ArrayList<>(); + List bucketSnapshotSegments = new ArrayList<>(); long timeMillis = System.currentTimeMillis(); - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex delayedIndex = - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex.newBuilder().setLedgerId(100L).setEntryId(10L) - .setTimestamp(timeMillis).build(); - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment snapshotSegment = - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment.newBuilder().addIndexes(delayedIndex).build(); + DelayedIndex delayedIndex = new DelayedIndex().setLedgerId(100L).setEntryId(10L) + .setTimestamp(timeMillis); + SnapshotSegment snapshotSegment = new SnapshotSegment(); + snapshotSegment.addIndexe().copyFrom(delayedIndex); bucketSnapshotSegments.add(snapshotSegment); bucketSnapshotSegments.add(snapshotSegment); @@ -98,13 +100,13 @@ public void testGetSnapshot() throws ExecutionException, InterruptedException { Long bucketId = future.get(); Assert.assertNotNull(bucketId); - CompletableFuture> bucketSnapshotSegment = + CompletableFuture> bucketSnapshotSegment = bucketSnapshotStorage.getBucketSnapshotSegment(bucketId, 1, 3); - List snapshotSegments = bucketSnapshotSegment.get(); + List snapshotSegments = bucketSnapshotSegment.get(); Assert.assertEquals(2, snapshotSegments.size()); - for (DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment segment : snapshotSegments) { - for (DelayedMessageIndexBucketSnapshotFormat.DelayedIndex index : segment.getIndexesList()) { + for (SnapshotSegment segment : snapshotSegments) { + for (DelayedIndex index : segment.getIndexesList()) { Assert.assertEquals(100L, index.getLedgerId()); Assert.assertEquals(10L, index.getEntryId()); Assert.assertEquals(timeMillis, index.getTimestamp()); @@ -120,17 +122,17 @@ public void testGetSnapshotMetadata() throws ExecutionException, InterruptedExce map.put(100L, ByteString.copyFrom("test1", StandardCharsets.UTF_8)); map.put(200L, ByteString.copyFrom("test2", StandardCharsets.UTF_8)); - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata segmentMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata.newBuilder() + SnapshotSegmentMetadata segmentMetadata = + SnapshotSegmentMetadata.newBuilder() .setMaxScheduleTimestamp(timeMillis) .setMinScheduleTimestamp(timeMillis) .putAllDelayedIndexBitMap(map).build(); - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata snapshotMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata.newBuilder() + SnapshotMetadata snapshotMetadata = + SnapshotMetadata.newBuilder() .addMetadataList(segmentMetadata) .build(); - List bucketSnapshotSegments = new ArrayList<>(); + List bucketSnapshotSegments = new ArrayList<>(); CompletableFuture future = bucketSnapshotStorage.createBucketSnapshot(snapshotMetadata, @@ -138,10 +140,10 @@ public void testGetSnapshotMetadata() throws ExecutionException, InterruptedExce Long bucketId = future.get(); Assert.assertNotNull(bucketId); - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata bucketSnapshotMetadata = + SnapshotMetadata bucketSnapshotMetadata = bucketSnapshotStorage.getBucketSnapshotMetadata(bucketId).get(); - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata metadata = + SnapshotSegmentMetadata metadata = bucketSnapshotMetadata.getMetadataList(0); Assert.assertEquals(timeMillis, metadata.getMaxScheduleTimestamp()); @@ -151,9 +153,9 @@ public void testGetSnapshotMetadata() throws ExecutionException, InterruptedExce @Test public void testDeleteSnapshot() throws ExecutionException, InterruptedException { - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata snapshotMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata.newBuilder().build(); - List bucketSnapshotSegments = new ArrayList<>(); + SnapshotMetadata snapshotMetadata = + SnapshotMetadata.newBuilder().build(); + List bucketSnapshotSegments = new ArrayList<>(); CompletableFuture future = bucketSnapshotStorage.createBucketSnapshot(snapshotMetadata, bucketSnapshotSegments, UUID.randomUUID().toString(), TOPIC_NAME, CURSOR_NAME); @@ -172,24 +174,22 @@ public void testDeleteSnapshot() throws ExecutionException, InterruptedException @Test public void testGetBucketSnapshotLength() throws ExecutionException, InterruptedException { - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata segmentMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegmentMetadata.newBuilder() + SnapshotSegmentMetadata segmentMetadata = + SnapshotSegmentMetadata.newBuilder() .setMinScheduleTimestamp(System.currentTimeMillis()) .setMaxScheduleTimestamp(System.currentTimeMillis()) .putDelayedIndexBitMap(100L, ByteString.copyFrom(new byte[1])).build(); - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata snapshotMetadata = - DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata.newBuilder() + SnapshotMetadata snapshotMetadata = + SnapshotMetadata.newBuilder() .addMetadataList(segmentMetadata) .build(); - List bucketSnapshotSegments = new ArrayList<>(); + List bucketSnapshotSegments = new ArrayList<>(); long timeMillis = System.currentTimeMillis(); - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex delayedIndex = - DelayedMessageIndexBucketSnapshotFormat.DelayedIndex.newBuilder().setLedgerId(100L).setEntryId(10L) - .setTimestamp(timeMillis).build(); - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment snapshotSegment = - DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment.newBuilder().addIndexes(delayedIndex).build(); + DelayedIndex delayedIndex = new DelayedIndex().setLedgerId(100L).setEntryId(10L).setTimestamp(timeMillis); + SnapshotSegment snapshotSegment = new SnapshotSegment(); + snapshotSegment.addIndexe().copyFrom(delayedIndex); bucketSnapshotSegments.add(snapshotSegment); bucketSnapshotSegments.add(snapshotSegment); @@ -204,4 +204,44 @@ public void testGetBucketSnapshotLength() throws ExecutionException, Interrupted Assert.assertTrue(bucketSnapshotLength > 0L); } + @Test + public void testConcurrencyGet() throws ExecutionException, InterruptedException { + SnapshotSegmentMetadata segmentMetadata = + SnapshotSegmentMetadata.newBuilder() + .setMinScheduleTimestamp(System.currentTimeMillis()) + .setMaxScheduleTimestamp(System.currentTimeMillis()) + .putDelayedIndexBitMap(100L, ByteString.copyFrom(new byte[1])).build(); + + SnapshotMetadata snapshotMetadata = + SnapshotMetadata.newBuilder() + .addMetadataList(segmentMetadata) + .build(); + List bucketSnapshotSegments = new ArrayList<>(); + + long timeMillis = System.currentTimeMillis(); + DelayedIndex delayedIndex = new DelayedIndex().setLedgerId(100L).setEntryId(10L).setTimestamp(timeMillis); + SnapshotSegment snapshotSegment = new SnapshotSegment(); + snapshotSegment.addIndexe().copyFrom(delayedIndex); + bucketSnapshotSegments.add(snapshotSegment); + bucketSnapshotSegments.add(snapshotSegment); + + CompletableFuture future = + bucketSnapshotStorage.createBucketSnapshot(snapshotMetadata, + bucketSnapshotSegments, UUID.randomUUID().toString(), TOPIC_NAME, CURSOR_NAME); + Long bucketId = future.get(); + Assert.assertNotNull(bucketId); + + List> futures = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + CompletableFuture future0 = CompletableFuture.runAsync(() -> { + List list = + bucketSnapshotStorage.getBucketSnapshotSegment(bucketId, 1, 3).join(); + Assert.assertTrue(list.size() > 0); + }); + futures.add(future0); + } + + FutureUtil.waitForAll(futures).join(); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockBucketSnapshotStorage.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockBucketSnapshotStorage.java index 9e924bdeda341..dc1c0e09ca276 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockBucketSnapshotStorage.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockBucketSnapshotStorage.java @@ -36,8 +36,8 @@ import java.util.concurrent.atomic.AtomicLong; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.delayed.bucket.BucketSnapshotStorage; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotMetadata; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.SnapshotMetadata; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; import org.apache.pulsar.common.util.FutureUtil; @Slf4j @@ -139,13 +139,9 @@ public CompletableFuture> getBucketSnapshotSegment(long bu long lastEntryId = Math.min(lastSegmentEntryId, this.bucketSnapshots.get(bucketId).size()); for (int i = (int) firstSegmentEntryId; i <= lastEntryId ; i++) { ByteBuf byteBuf = this.bucketSnapshots.get(bucketId).get(i); - SnapshotSegment snapshotSegment; - try { - snapshotSegment = SnapshotSegment.parseFrom(byteBuf.nioBuffer()); - snapshotSegments.add(snapshotSegment); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } + SnapshotSegment snapshotSegment = new SnapshotSegment(); + snapshotSegment.parseFrom(byteBuf, byteBuf.readableBytes()); + snapshotSegments.add(snapshotSegment); } return snapshotSegments; }, executorService); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockManagedCursor.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockManagedCursor.java index 499262c1e60b9..477290fc6837a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockManagedCursor.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/MockManagedCursor.java @@ -24,6 +24,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -276,6 +277,11 @@ public void asyncFindNewestMatching(FindPositionConstraint constraint, } + @Override + public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate condition, + AsyncCallbacks.FindEntryCallback callback, Object ctx, boolean isFindFromLedger) { + } + @Override public void resetCursor(Position position) throws InterruptedException, ManagedLedgerException { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTrackerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTrackerTest.java index 95234d688f6a0..39b3992fbd195 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTrackerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTrackerTest.java @@ -39,6 +39,7 @@ import java.util.NavigableSet; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -197,9 +198,11 @@ public void testRecoverSnapshot(BucketDelayedDeliveryTracker tracker) { }); assertTrue(tracker.hasMessageAvailable()); - Set scheduledMessages = tracker.getScheduledMessages(100); - - assertEquals(scheduledMessages.size(), 1); + Set scheduledMessages = new TreeSet<>(); + Awaitility.await().untilAsserted(() -> { + scheduledMessages.addAll(tracker.getScheduledMessages(100)); + assertEquals(scheduledMessages.size(), 1); + }); tracker.addMessage(101, 101, 101 * 10); @@ -216,12 +219,15 @@ public void testRecoverSnapshot(BucketDelayedDeliveryTracker tracker) { clockTime.set(100 * 10); assertTrue(tracker2.hasMessageAvailable()); - scheduledMessages = tracker2.getScheduledMessages(70); + Set scheduledMessages2 = new TreeSet<>(); - assertEquals(scheduledMessages.size(), 70); + Awaitility.await().untilAsserted(() -> { + scheduledMessages2.addAll(tracker2.getScheduledMessages(70)); + assertEquals(scheduledMessages2.size(), 70); + }); int i = 31; - for (PositionImpl scheduledMessage : scheduledMessages) { + for (PositionImpl scheduledMessage : scheduledMessages2) { assertEquals(scheduledMessage, PositionImpl.get(i, i)); i++; } @@ -298,7 +304,11 @@ public void testMergeSnapshot(final BucketDelayedDeliveryTracker tracker) { clockTime.set(110 * 10); - NavigableSet scheduledMessages = tracker2.getScheduledMessages(110); + NavigableSet scheduledMessages = new TreeSet<>(); + Awaitility.await().untilAsserted(() -> { + scheduledMessages.addAll(tracker2.getScheduledMessages(110)); + assertEquals(scheduledMessages.size(), 110); + }); for (int i = 1; i <= 110; i++) { PositionImpl position = scheduledMessages.pollFirst(); assertEquals(position, PositionImpl.get(i, i)); @@ -370,7 +380,11 @@ public void testWithBkException(final BucketDelayedDeliveryTracker tracker) { assertEquals(tracker2.getScheduledMessages(100).size(), 0); - assertEquals(tracker2.getScheduledMessages(100).size(), delayedMessagesInSnapshotValue); + Set scheduledMessages = new TreeSet<>(); + Awaitility.await().untilAsserted(() -> { + scheduledMessages.addAll(tracker2.getScheduledMessages(100)); + assertEquals(scheduledMessages.size(), delayedMessagesInSnapshotValue); + }); assertTrue(mockBucketSnapshotStorage.createExceptionQueue.isEmpty()); assertTrue(mockBucketSnapshotStorage.getMetaDataExceptionQueue.isEmpty()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueueTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueueTest.java index 8f87f0d49a20c..5dc3dcc7cb9a7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueueTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/bucket/DelayedIndexQueueTest.java @@ -23,8 +23,8 @@ import java.util.List; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.DelayedIndex; -import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat.SnapshotSegment; +import org.apache.pulsar.broker.delayed.proto.DelayedIndex; +import org.apache.pulsar.broker.delayed.proto.SnapshotSegment; import org.apache.pulsar.common.util.collections.TripleLongPriorityQueue; import org.testng.Assert; import org.testng.annotations.Test; @@ -35,27 +35,19 @@ public class DelayedIndexQueueTest { @Test public void testCompare() { DelayedIndex delayedIndex = - DelayedIndex.newBuilder().setTimestamp(1).setLedgerId(1L).setEntryId(1L) - .build(); + new DelayedIndex().setTimestamp(1).setLedgerId(1L).setEntryId(1L); DelayedIndex delayedIndex2 = - DelayedIndex.newBuilder().setTimestamp(2).setLedgerId(2L).setEntryId(2L) - .build(); + new DelayedIndex().setTimestamp(2).setLedgerId(2L).setEntryId(2L); Assert.assertTrue(COMPARATOR.compare(delayedIndex, delayedIndex2) < 0); delayedIndex = - DelayedIndex.newBuilder().setTimestamp(1).setLedgerId(1L).setEntryId(1L) - .build(); + new DelayedIndex().setTimestamp(1).setLedgerId(1L).setEntryId(1L); delayedIndex2 = - DelayedIndex.newBuilder().setTimestamp(1).setLedgerId(2L).setEntryId(2L) - .build(); + new DelayedIndex().setTimestamp(1).setLedgerId(2L).setEntryId(2L); Assert.assertTrue(COMPARATOR.compare(delayedIndex, delayedIndex2) < 0); - delayedIndex = - DelayedIndex.newBuilder().setTimestamp(1).setLedgerId(1L).setEntryId(1L) - .build(); - delayedIndex2 = - DelayedIndex.newBuilder().setTimestamp(1).setLedgerId(1L).setEntryId(2L) - .build(); + delayedIndex = new DelayedIndex().setTimestamp(1).setLedgerId(1L).setEntryId(1L); + delayedIndex2 = new DelayedIndex().setTimestamp(1).setLedgerId(1L).setEntryId(2L); Assert.assertTrue(COMPARATOR.compare(delayedIndex, delayedIndex2) < 0); } @@ -63,21 +55,19 @@ public void testCompare() { public void testCombinedSegmentDelayedIndexQueue() { List listA = new ArrayList<>(); for (int i = 0; i < 10; i++) { - DelayedIndex delayedIndex = - DelayedIndex.newBuilder().setTimestamp(i).setLedgerId(1L).setEntryId(1L) - .build(); + DelayedIndex delayedIndex = new DelayedIndex().setTimestamp(i).setLedgerId(1L).setEntryId(1L); listA.add(delayedIndex); } - SnapshotSegment snapshotSegmentA1 = SnapshotSegment.newBuilder().addAllIndexes(listA).build(); + SnapshotSegment snapshotSegmentA1 = new SnapshotSegment(); + snapshotSegmentA1.addAllIndexes(listA); List listA2 = new ArrayList<>(); for (int i = 10; i < 20; i++) { - DelayedIndex delayedIndex = - DelayedIndex.newBuilder().setTimestamp(i).setLedgerId(1L).setEntryId(1L) - .build(); + DelayedIndex delayedIndex = new DelayedIndex().setTimestamp(i).setLedgerId(1L).setEntryId(1L); listA2.add(delayedIndex); } - SnapshotSegment snapshotSegmentA2 = SnapshotSegment.newBuilder().addAllIndexes(listA2).build(); + SnapshotSegment snapshotSegmentA2 = new SnapshotSegment(); + snapshotSegmentA2.addAllIndexes(listA2); List segmentListA = new ArrayList<>(); segmentListA.add(snapshotSegmentA1); @@ -85,36 +75,32 @@ public void testCombinedSegmentDelayedIndexQueue() { List listB = new ArrayList<>(); for (int i = 0; i < 9; i++) { - DelayedIndex delayedIndex = - DelayedIndex.newBuilder().setTimestamp(i).setLedgerId(2L).setEntryId(1L) - .build(); + DelayedIndex delayedIndex = new DelayedIndex().setTimestamp(i).setLedgerId(2L).setEntryId(1L); - DelayedIndex delayedIndex2 = - DelayedIndex.newBuilder().setTimestamp(i).setLedgerId(2L).setEntryId(2L) - .build(); + DelayedIndex delayedIndex2 = new DelayedIndex().setTimestamp(i).setLedgerId(2L).setEntryId(2L); listB.add(delayedIndex); listB.add(delayedIndex2); } - SnapshotSegment snapshotSegmentB = SnapshotSegment.newBuilder().addAllIndexes(listB).build(); + SnapshotSegment snapshotSegmentB = new SnapshotSegment(); + snapshotSegmentB.addAllIndexes(listB); List segmentListB = new ArrayList<>(); segmentListB.add(snapshotSegmentB); - segmentListB.add(SnapshotSegment.newBuilder().build()); + segmentListB.add(new SnapshotSegment()); List listC = new ArrayList<>(); for (int i = 10; i < 30; i+=2) { DelayedIndex delayedIndex = - DelayedIndex.newBuilder().setTimestamp(i).setLedgerId(2L).setEntryId(1L) - .build(); + new DelayedIndex().setTimestamp(i).setLedgerId(2L).setEntryId(1L); DelayedIndex delayedIndex2 = - DelayedIndex.newBuilder().setTimestamp(i).setLedgerId(2L).setEntryId(2L) - .build(); + new DelayedIndex().setTimestamp(i).setLedgerId(2L).setEntryId(2L); listC.add(delayedIndex); listC.add(delayedIndex2); } - SnapshotSegment snapshotSegmentC = SnapshotSegment.newBuilder().addAllIndexes(listC).build(); + SnapshotSegment snapshotSegmentC = new SnapshotSegment(); + snapshotSegmentC.addAllIndexes(listC); List segmentListC = new ArrayList<>(); segmentListC.add(snapshotSegmentC); @@ -123,11 +109,14 @@ public void testCombinedSegmentDelayedIndexQueue() { int count = 0; while (!delayedIndexQueue.isEmpty()) { - DelayedIndex pop = delayedIndexQueue.pop(); + DelayedIndex pop = new DelayedIndex(); + delayedIndexQueue.popToObject(pop); log.info("{} , {}, {}", pop.getTimestamp(), pop.getLedgerId(), pop.getEntryId()); count++; if (!delayedIndexQueue.isEmpty()) { DelayedIndex peek = delayedIndexQueue.peek(); + long timestamp = delayedIndexQueue.peekTimestamp(); + Assert.assertEquals(timestamp, peek.getTimestamp()); Assert.assertTrue(COMPARATOR.compare(peek, pop) >= 0); } } @@ -147,10 +136,13 @@ public void TripleLongPriorityDelayedIndexQueueTest() { int count = 0; while (!delayedIndexQueue.isEmpty()) { - DelayedIndex pop = delayedIndexQueue.pop(); + DelayedIndex pop = new DelayedIndex(); + delayedIndexQueue.popToObject(pop); count++; if (!delayedIndexQueue.isEmpty()) { DelayedIndex peek = delayedIndexQueue.peek(); + long timestamp = delayedIndexQueue.peekTimestamp(); + Assert.assertEquals(timestamp, peek.getTimestamp()); Assert.assertTrue(COMPARATOR.compare(peek, pop) >= 0); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersMultiBrokerLeaderElectionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersMultiBrokerLeaderElectionTest.java new file mode 100644 index 0000000000000..5adc78b2c5212 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersMultiBrokerLeaderElectionTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance; + +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class AdvertisedListenersMultiBrokerLeaderElectionTest extends MultiBrokerLeaderElectionTest { + @Override + protected PulsarTestContext.Builder createPulsarTestContextBuilder(ServiceConfiguration conf) { + conf.setWebServicePortTls(Optional.of(0)); + return super.createPulsarTestContextBuilder(conf).preallocatePorts(true).configOverride(config -> { + // use advertised address that is different than the name used in the advertised listeners + config.setAdvertisedAddress("localhost"); + config.setAdvertisedListeners( + "public_pulsar:pulsar://127.0.0.1:" + config.getBrokerServicePort().get() + + ",public_http:http://127.0.0.1:" + config.getWebServicePort().get() + + ",public_https:https://127.0.0.1:" + config.getWebServicePortTls().get()); + }); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java index 7a8154312e4dc..a88ccd60ae4c4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java @@ -78,7 +78,6 @@ private void updateConfig(ServiceConfiguration conf, String advertisedAddress) { ",public_https:https://localhost:" + httpsPort); conf.setBrokerServicePort(Optional.of(pulsarPort)); conf.setWebServicePort(Optional.of(httpPort)); - conf.setWebServicePortTls(Optional.of(httpsPort)); } @Test @@ -101,7 +100,6 @@ public void testLookup() throws Exception { assertEquals(new URI(ld.getBrokerUrl()).getHost(), "localhost"); assertEquals(new URI(ld.getHttpUrl()).getHost(), "localhost"); - assertEquals(new URI(ld.getHttpUrlTls()).getHost(), "localhost"); // Produce data diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java index 560cfa9216a02..5fbda961c0e3d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java @@ -103,14 +103,14 @@ public void setup() throws Exception { setupConfigs(conf); super.internalSetup(conf); pulsar1 = pulsar; - primaryHost = String.format("%s:%d", "localhost", pulsar1.getListenPortHTTP().get()); + primaryHost = pulsar1.getBrokerId(); admin1 = admin; var config2 = getDefaultConf(); setupConfigs(config2); additionalPulsarTestContext = createAdditionalPulsarTestContext(config2); pulsar2 = additionalPulsarTestContext.getPulsarService(); - secondaryHost = String.format("%s:%d", "localhost", pulsar2.getListenPortHTTP().get()); + secondaryHost = pulsar2.getBrokerId(); primaryLoadManager = getField(pulsar1.getLoadManager().get(), "loadManager"); secondaryLoadManager = getField(pulsar2.getLoadManager().get(), "loadManager"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java index 62faa70bbcb76..ded4ee8e58d53 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java @@ -115,7 +115,8 @@ public void anErrorShouldBeThrowBeforeLeaderElected() throws PulsarServerExcepti checkLookupException(tenant, namespace, client); // broker, webService and leaderElectionService is started, and elect is done; - leaderBrokerReference.set(new LeaderBroker(pulsar.getWebServiceAddress())); + leaderBrokerReference.set( + new LeaderBroker(pulsar.getBrokerId(), pulsar.getSafeWebServiceAddress())); Producer producer = client.newProducer() .topic("persistent://" + tenant + "/" + namespace + "/1p") @@ -129,10 +130,10 @@ private void checkLookupException(String tenant, String namespace, PulsarClient .topic("persistent://" + tenant + "/" + namespace + "/1p") .create(); } catch (PulsarClientException t) { - Assert.assertTrue(t instanceof PulsarClientException.LookupException); + Assert.assertTrue(t instanceof PulsarClientException.BrokerMetadataException + || t instanceof PulsarClientException.LookupException); Assert.assertTrue( - t.getMessage().contains( - "java.lang.IllegalStateException: The leader election has not yet been completed!")); + t.getMessage().contains("The leader election has not yet been completed")); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtilsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtilsTest.java new file mode 100644 index 0000000000000..ac21b30bdde51 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtilsTest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mockStatic; +import static org.testng.Assert.assertEquals; +import java.nio.file.Files; +import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import org.mockito.MockedStatic; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class LinuxInfoUtilsTest { + + /** + * simulate reading first line of /proc/stat to get total cpu usage. + */ + @Test + public void testGetCpuUsageForEntireHost(){ + try (MockedStatic filesMockedStatic = mockStatic(Files.class)) { + filesMockedStatic.when(() -> Files.lines(any())).thenReturn( + Stream.generate(() -> "cpu 317808 128 58637 2503692 7634 0 13472 0 0 0")); + long idle = 2503692 + 7634, total = 2901371; + LinuxInfoUtils.ResourceUsage resourceUsage = LinuxInfoUtils.ResourceUsage.builder() + .usage(total - idle) + .idle(idle) + .total(total).build(); + assertEquals(LinuxInfoUtils.getCpuUsageForEntireHost(), resourceUsage); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java index 68902c73e5717..04a2175b1d10f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java @@ -124,7 +124,6 @@ void setup() throws Exception { config.setAdvertisedAddress("localhost"); config.setWebServicePort(Optional.of(0)); config.setBrokerServicePortTls(Optional.of(0)); - config.setWebServicePortTls(Optional.of(0)); config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -139,7 +138,7 @@ void setup() throws Exception { brokerNativeBrokerPorts[i] = pulsarServices[i].getBrokerListenPort().get(); brokerUrls[i] = new URL("http://127.0.0.1" + ":" + brokerWebServicePorts[i]); - lookupAddresses[i] = pulsarServices[i].getAdvertisedAddress() + ":" + pulsarServices[i].getListenPortHTTP().get(); + lookupAddresses[i] = pulsarServices[i].getBrokerId(); pulsarAdmins[i] = PulsarAdmin.builder().serviceHttpUrl(brokerUrls[i].toString()).build(); } @@ -410,7 +409,7 @@ public void testTopicAssignmentWithExistingBundles() throws Exception { double expectedMaxVariation = 10.0; for (int i = 0; i < BROKER_COUNT; i++) { long actualValue = 0; - String resourceId = "http://" + lookupAddresses[i]; + String resourceId = lookupAddresses[i]; if (namespaceOwner.containsKey(resourceId)) { actualValue = namespaceOwner.get(resourceId); } @@ -696,7 +695,7 @@ public void testLeaderElection() throws Exception { } } // Make sure all brokers see the same leader - log.info("Old leader is : {}", oldLeader.getServiceUrl()); + log.info("Old leader is : {}", oldLeader.getBrokerId()); for (PulsarService pulsar : activePulsar) { log.info("Current leader for {} is : {}", pulsar.getWebServiceAddress(), pulsar.getLeaderElectionService().getCurrentLeader()); assertEquals(pulsar.getLeaderElectionService().readCurrentLeader().join(), Optional.of(oldLeader)); @@ -706,7 +705,7 @@ public void testLeaderElection() throws Exception { leaderPulsar.close(); loopUntilLeaderChangesForAllBroker(followerPulsar, oldLeader); LeaderBroker newLeader = followerPulsar.get(0).getLeaderElectionService().readCurrentLeader().join().get(); - log.info("New leader is : {}", newLeader.getServiceUrl()); + log.info("New leader is : {}", newLeader.getBrokerId()); Assert.assertNotEquals(newLeader, oldLeader); } } @@ -744,7 +743,7 @@ private void createNamespacePolicies(PulsarService pulsar) throws Exception { // set up policy that use this broker as secondary policyData = NamespaceIsolationData.builder() .namespaces(Collections.singletonList("pulsar/use/secondary-ns.*")) - .primary(Collections.singletonList(pulsarServices[0].getWebServiceAddress())) + .primary(Collections.singletonList(pulsarServices[0].getAdvertisedAddress())) .secondary(allExceptFirstBroker) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) @@ -756,7 +755,7 @@ private void createNamespacePolicies(PulsarService pulsar) throws Exception { // set up policy that do not use this broker (neither primary nor secondary) policyData = NamespaceIsolationData.builder() .namespaces(Collections.singletonList("pulsar/use/shared-ns.*")) - .primary(Collections.singletonList(pulsarServices[0].getWebServiceAddress())) + .primary(Collections.singletonList(pulsarServices[0].getAdvertisedAddress())) .secondary(allExceptFirstBroker) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java index bec971dfa40e7..ffa2607b6b42e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java @@ -60,12 +60,12 @@ public void checkLoadReportNicSpeed() throws Exception { LoadManagerReport report = admin.brokerStats().getLoadReport(); if (SystemUtils.IS_OS_LINUX) { - assertEquals(report.getBandwidthIn().limit, usableNicCount * 5.4 * 1000 * 1000); - assertEquals(report.getBandwidthOut().limit, usableNicCount * 5.4 * 1000 * 1000); + assertEquals(report.getBandwidthIn().limit, usableNicCount * 5.4 * 1000 * 1000, 0.0001); + assertEquals(report.getBandwidthOut().limit, usableNicCount * 5.4 * 1000 * 1000, 0.0001); } else { // On non-Linux system we don't report the network usage - assertEquals(report.getBandwidthIn().limit, -1.0); - assertEquals(report.getBandwidthOut().limit, -1.0); + assertEquals(report.getBandwidthIn().limit, -1.0, 0.0001); + assertEquals(report.getBandwidthOut().limit, -1.0, 0.0001); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java index 5c840129dd8d5..103983506a6dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java @@ -20,6 +20,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -36,14 +37,24 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.MultiBrokerTestZKBaseTest; import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.client.admin.Lookup; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.impl.LookupService; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.common.naming.TopicName; import org.awaitility.Awaitility; import org.testng.annotations.Test; @Slf4j @Test(groups = "broker") public class MultiBrokerLeaderElectionTest extends MultiBrokerTestZKBaseTest { + public MultiBrokerLeaderElectionTest() { + super(); + this.isTcpLookup = true; + } + @Override protected int numberOfAdditionalBrokers() { return 9; @@ -88,39 +99,80 @@ public void shouldAllBrokersBeAbleToGetTheLeader() { }); } - @Test - public void shouldProvideConsistentAnswerToTopicLookups() + @Test(timeOut = 60000L) + public void shouldProvideConsistentAnswerToTopicLookupsUsingAdminApi() throws PulsarAdminException, ExecutionException, InterruptedException { - String topicNameBase = "persistent://public/default/lookuptest" + UUID.randomUUID() + "-"; + String namespace = "public/ns" + UUID.randomUUID(); + admin.namespaces().createNamespace(namespace, 256); + String topicNameBase = "persistent://" + namespace + "/lookuptest-"; List topicNames = IntStream.range(0, 500).mapToObj(i -> topicNameBase + i) .collect(Collectors.toList()); List allAdmins = getAllAdmins(); @Cleanup("shutdown") ExecutorService executorService = Executors.newFixedThreadPool(allAdmins.size()); List>> resultFutures = new ArrayList<>(); - String leaderBrokerUrl = admin.brokers().getLeaderBroker().getServiceUrl(); - log.info("LEADER is {}", leaderBrokerUrl); // use Phaser to increase the chances of a race condition by triggering all threads once - // they are waiting just before the lookupTopic call + // they are waiting just before each lookupTopic call final Phaser phaser = new Phaser(1); for (PulsarAdmin brokerAdmin : allAdmins) { - if (!leaderBrokerUrl.equals(brokerAdmin.getServiceUrl())) { - phaser.register(); - log.info("Doing lookup to broker {}", brokerAdmin.getServiceUrl()); - resultFutures.add(executorService.submit(() -> { - phaser.arriveAndAwaitAdvance(); - return topicNames.stream().map(topicName -> { - try { - return brokerAdmin.lookups().lookupTopic(topicName); - } catch (PulsarAdminException e) { - log.error("Error looking up topic {} in {}", topicName, brokerAdmin.getServiceUrl()); - throw new RuntimeException(e); - } - }).collect(Collectors.toList()); - })); + phaser.register(); + Lookup lookups = brokerAdmin.lookups(); + log.info("Doing lookup to broker {}", brokerAdmin.getServiceUrl()); + resultFutures.add(executorService.submit(() -> topicNames.stream().map(topicName -> { + phaser.arriveAndAwaitAdvance(); + try { + return lookups.lookupTopic(topicName); + } catch (PulsarAdminException e) { + log.error("Error looking up topic {} in {}", topicName, brokerAdmin.getServiceUrl()); + throw new RuntimeException(e); + } + }).collect(Collectors.toList()))); + } + phaser.arriveAndDeregister(); + List firstResult = null; + for (Future> resultFuture : resultFutures) { + List result = resultFuture.get(); + if (firstResult == null) { + firstResult = result; + } else { + assertEquals(result, firstResult, "The lookup results weren't consistent."); } } - phaser.arriveAndAwaitAdvance(); + } + + @Test(timeOut = 60000L) + public void shouldProvideConsistentAnswerToTopicLookupsUsingClient() + throws PulsarAdminException, ExecutionException, InterruptedException { + String namespace = "public/ns" + UUID.randomUUID(); + admin.namespaces().createNamespace(namespace, 256); + String topicNameBase = "persistent://" + namespace + "/lookuptest-"; + List topicNames = IntStream.range(0, 500).mapToObj(i -> topicNameBase + i) + .collect(Collectors.toList()); + List allClients = getAllClients(); + @Cleanup("shutdown") + ExecutorService executorService = Executors.newFixedThreadPool(allClients.size()); + List>> resultFutures = new ArrayList<>(); + // use Phaser to increase the chances of a race condition by triggering all threads once + // they are waiting just before each lookupTopic call + final Phaser phaser = new Phaser(1); + for (PulsarClient brokerClient : allClients) { + phaser.register(); + String serviceUrl = ((PulsarClientImpl) brokerClient).getConfiguration().getServiceUrl(); + LookupService lookupService = ((PulsarClientImpl) brokerClient).getLookup(); + log.info("Doing lookup to broker {}", serviceUrl); + resultFutures.add(executorService.submit(() -> topicNames.stream().map(topicName -> { + phaser.arriveAndAwaitAdvance(); + try { + InetSocketAddress logicalAddress = + lookupService.getBroker(TopicName.get(topicName)).get().getLeft(); + return logicalAddress.getHostString() + ":" + logicalAddress.getPort(); + } catch (InterruptedException | ExecutionException e) { + log.error("Error looking up topic {} in {}", topicName, serviceUrl); + throw new RuntimeException(e); + } + }).collect(Collectors.toList()))); + } + phaser.arriveAndDeregister(); List firstResult = null; for (Future> resultFuture : resultFutures) { List result = resultFuture.get(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleBrokerStartTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleBrokerStartTest.java index 6de2eb90f12ef..28dde8b7f559d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleBrokerStartTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleBrokerStartTest.java @@ -20,6 +20,8 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.Optional; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; @@ -28,6 +30,7 @@ import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.testng.Assert; import org.testng.annotations.Test; @Slf4j @@ -96,4 +99,27 @@ public void testNoNICSpeed() throws Exception { } + @Test + public void testCGroupMetrics() { + if (!LinuxInfoUtils.isLinux()) { + return; + } + + boolean existsCGroup = Files.exists(Paths.get("/sys/fs/cgroup")); + boolean cGroupEnabled = LinuxInfoUtils.isCGroupEnabled(); + Assert.assertEquals(cGroupEnabled, existsCGroup); + + double totalCpuLimit = LinuxInfoUtils.getTotalCpuLimit(cGroupEnabled); + log.info("totalCpuLimit: {}", totalCpuLimit); + Assert.assertTrue(totalCpuLimit > 0.0); + + if (cGroupEnabled) { + Assert.assertNotNull(LinuxInfoUtils.getMetrics()); + + long cpuUsageForCGroup = LinuxInfoUtils.getCpuUsageForCGroup(); + log.info("cpuUsageForCGroup: {}", cpuUsageForCGroup); + Assert.assertTrue(cpuUsageForCGroup > 0); + } + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java index c4898786e3e03..f92faa3e4bd54 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java @@ -55,12 +55,16 @@ import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceUnit; import org.apache.pulsar.client.admin.BrokerStats; import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.policies.data.AutoFailoverPolicyData; import org.apache.pulsar.common.policies.data.AutoFailoverPolicyType; +import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.NamespaceIsolationData; import org.apache.pulsar.common.policies.data.ResourceQuota; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.policies.impl.NamespaceIsolationPolicies; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.metadata.api.MetadataStoreException.BadVersionException; @@ -71,6 +75,7 @@ import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -92,8 +97,14 @@ public class SimpleLoadManagerImplTest { BrokerStats brokerStatsClient2; String primaryHost; + + String primaryTlsHost; + String secondaryHost; + private String defaultNamespace; + private String defaultTenant; + ExecutorService executor = new ThreadPoolExecutor(5, 20, 30, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); @BeforeMethod @@ -107,13 +118,13 @@ void setup() throws Exception { ServiceConfiguration config1 = new ServiceConfiguration(); config1.setClusterName("use"); config1.setWebServicePort(Optional.of(0)); + config1.setWebServicePortTls(Optional.of(0)); config1.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config1.setBrokerShutdownTimeoutMs(0L); config1.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config1.setBrokerServicePort(Optional.of(0)); config1.setLoadManagerClassName(SimpleLoadManagerImpl.class.getName()); config1.setBrokerServicePortTls(Optional.of(0)); - config1.setWebServicePortTls(Optional.of(0)); config1.setAdvertisedAddress("localhost"); pulsar1 = new PulsarService(config1); pulsar1.start(); @@ -122,11 +133,13 @@ void setup() throws Exception { admin1 = PulsarAdmin.builder().serviceHttpUrl(url1.toString()).build(); brokerStatsClient1 = admin1.brokerStats(); primaryHost = pulsar1.getWebServiceAddress(); + primaryTlsHost = pulsar1.getWebServiceAddressTls(); // Start broker 2 ServiceConfiguration config2 = new ServiceConfiguration(); config2.setClusterName("use"); config2.setWebServicePort(Optional.of(0)); + config2.setWebServicePortTls(Optional.of(0)); config2.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config2.setBrokerShutdownTimeoutMs(0L); config2.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -143,6 +156,8 @@ void setup() throws Exception { brokerStatsClient2 = admin2.brokerStats(); secondaryHost = pulsar2.getWebServiceAddress(); Thread.sleep(100); + + setupClusters(); } @AfterMethod(alwaysRun = true) @@ -196,7 +211,7 @@ public void testBasicBrokerSelection() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit("http://prod2-broker7.messaging.usw.example.com:8080", rd); + ResourceUnit ru1 = new SimpleResourceUnit("prod2-broker7.messaging.usw.example.com:8080", rd); Set rus = new HashSet<>(); rus.add(ru1); LoadRanker lr = new ResourceAvailabilityRanker(); @@ -231,15 +246,15 @@ public void testPrimary() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit( - "http://" + pulsar1.getAdvertisedAddress() + ":" + pulsar1.getConfiguration().getWebServicePort().get(), rd); + ResourceUnit ru1 = new SimpleResourceUnit(pulsar1.getBrokerId(), rd); Set rus = new HashSet<>(); rus.add(ru1); LoadRanker lr = new ResourceAvailabilityRanker(); // inject the load report and rankings Map loadReports = new HashMap<>(); - org.apache.pulsar.policies.data.loadbalancer.LoadReport loadReport = new org.apache.pulsar.policies.data.loadbalancer.LoadReport(); + org.apache.pulsar.policies.data.loadbalancer.LoadReport loadReport = + new org.apache.pulsar.policies.data.loadbalancer.LoadReport(); loadReport.setSystemResourceUsage(new SystemResourceUsage()); loadReports.put(ru1, loadReport); setObjectField(SimpleLoadManagerImpl.class, loadManager, "currentLoadReports", loadReports); @@ -254,11 +269,9 @@ public void testPrimary() throws Exception { sortedRankingsInstance.get().put(lr.getRank(rd), rus); setObjectField(SimpleLoadManagerImpl.class, loadManager, "sortedRankings", sortedRankingsInstance); - ResourceUnit found = loadManager - .getLeastLoaded(NamespaceName.get("pulsar/use/primary-ns.10")).get(); - // broker is not active so found should be null + ResourceUnit found = loadManager.getLeastLoaded(NamespaceName.get("pulsar/use/primary-ns.10")).get(); + // TODO: this test doesn't make sense. This was the original assertion. assertNotEquals(found, null, "did not find a broker when expected one to be found"); - } @Test(enabled = false) @@ -272,7 +285,7 @@ public void testPrimarySecondary() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit("http://prod2-broker7.messaging.usw.example.com:8080", rd); + ResourceUnit ru1 = new SimpleResourceUnit("prod2-broker7.messaging.usw.example.com:8080", rd); Set rus = new HashSet<>(); rus.add(ru1); LoadRanker lr = new ResourceAvailabilityRanker(); @@ -341,8 +354,8 @@ public void testDoLoadShedding() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit("http://pulsar-broker1.com:8080", rd); - ResourceUnit ru2 = new SimpleResourceUnit("http://pulsar-broker2.com:8080", rd); + ResourceUnit ru1 = new SimpleResourceUnit("pulsar-broker1.com:8080", rd); + ResourceUnit ru2 = new SimpleResourceUnit("pulsar-broker2.com:8080", rd); Set rus = new HashSet<>(); rus.add(ru1); rus.add(ru2); @@ -395,14 +408,14 @@ public void testEvenBundleDistribution() throws Exception { final SimpleLoadManagerImpl loadManager = (SimpleLoadManagerImpl) pulsar1.getLoadManager().get(); for (final NamespaceBundle bundle : bundles) { - if (loadManager.getLeastLoaded(bundle).get().getResourceId().equals(primaryHost)) { + if (loadManager.getLeastLoaded(bundle).get().getResourceId().equals(pulsar1.getBrokerId())) { ++numAssignedToPrimary; } else { ++numAssignedToSecondary; } // Check that number of assigned bundles are equivalent when an even number have been assigned. if ((numAssignedToPrimary + numAssignedToSecondary) % 2 == 0) { - assert (numAssignedToPrimary == numAssignedToSecondary); + assertEquals(numAssignedToPrimary, numAssignedToSecondary); } } } @@ -475,4 +488,34 @@ public void testUsage() { assertEquals(usage.getBandwidthIn().usage, usageLimit); } + @Test + public void testGetWebSerUrl() throws PulsarAdminException { + String webServiceUrl = admin1.brokerStats().getLoadReport().getWebServiceUrl(); + Assert.assertEquals(webServiceUrl, pulsar1.getWebServiceAddress()); + + String webServiceUrl2 = admin2.brokerStats().getLoadReport().getWebServiceUrl(); + Assert.assertEquals(webServiceUrl2, pulsar2.getWebServiceAddress()); + } + + @Test + public void testRedirectOwner() throws PulsarAdminException { + final String topicName = "persistent://" + defaultNamespace + "/" + "test-topic"; + admin1.topics().createNonPartitionedTopic(topicName); + TopicStats stats = admin1.topics().getStats(topicName); + Assert.assertNotNull(stats); + + TopicStats stats2 = admin2.topics().getStats(topicName); + Assert.assertNotNull(stats2); + } + + private void setupClusters() throws PulsarAdminException { + admin1.clusters().createCluster("use", ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()) + .brokerServiceUrl(pulsar1.getBrokerServiceUrl()).build()); + TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("use")); + defaultTenant = "prop-xyz"; + admin1.tenants().createTenant(defaultTenant, tenantInfo); + defaultNamespace = defaultTenant + "/ns1"; + admin1.namespaces().createNamespace(defaultNamespace, Set.of("use")); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/AntiAffinityNamespaceGroupExtensionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/AntiAffinityNamespaceGroupExtensionTest.java index 3469dbe5a5499..2293ecd2c2403 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/AntiAffinityNamespaceGroupExtensionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/AntiAffinityNamespaceGroupExtensionTest.java @@ -130,8 +130,8 @@ public void testAntiAffinityGroupPolicyFilter() doReturn(namespace + "/" + bundle).when(namespaceBundle).toString(); var expected = new HashMap<>(brokers); - var actual = antiAffinityGroupPolicyFilter.filter( - brokers, namespaceBundle, context); + var actual = antiAffinityGroupPolicyFilter.filterAsync( + brokers, namespaceBundle, context).get(); assertEquals(actual, expected); doReturn(antiAffinityEnabledNameSpace + "/" + bundle).when(namespaceBundle).toString(); @@ -141,8 +141,8 @@ public void testAntiAffinityGroupPolicyFilter() var srcBroker = serviceUnitStateChannel.getOwnerAsync(namespaceBundle.toString()) .get(5, TimeUnit.SECONDS).get(); expected.remove(srcBroker); - actual = antiAffinityGroupPolicyFilter.filter( - brokers, namespaceBundle, context); + actual = antiAffinityGroupPolicyFilter.filterAsync( + brokers, namespaceBundle, context).get(); assertEquals(actual, expected); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java index 26986a494f0be..e199e695cf777 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java @@ -76,7 +76,7 @@ public class BrokerRegistryTest { private LocalBookkeeperEnsemble bkEnsemble; - // Make sure the load manager don't register itself to `/loadbalance/brokers/{lookupServiceAddress}` + // Make sure the load manager don't register itself to `/loadbalance/brokers/{brokerId}`. public static class MockLoadManager implements LoadManager { @Override @@ -292,7 +292,7 @@ public void testRegisterFailWithSameBrokerId() throws Exception { pulsar1.start(); pulsar2.start(); - doReturn(pulsar1.getLookupServiceAddress()).when(pulsar2).getLookupServiceAddress(); + doReturn(pulsar1.getBrokerId()).when(pulsar2).getBrokerId(); BrokerRegistryImpl brokerRegistry1 = createBrokerRegistryImpl(pulsar1); BrokerRegistryImpl brokerRegistry2 = createBrokerRegistryImpl(pulsar2); brokerRegistry1.start(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java index 9ab2467d0ef1b..43b84a5426f3c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.loadbalance.extensions; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState.Releasing; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelTest.overrideTableView; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.Admin; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.Bandwidth; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.MsgRate; @@ -35,9 +37,14 @@ import static org.apache.pulsar.broker.loadbalance.extensions.models.UnloadDecision.Reason.Overloaded; import static org.apache.pulsar.broker.loadbalance.extensions.models.UnloadDecision.Reason.Underloaded; import static org.apache.pulsar.broker.loadbalance.extensions.models.UnloadDecision.Reason.Unknown; +import static org.apache.pulsar.broker.namespace.NamespaceService.getHeartbeatNamespace; +import static org.apache.pulsar.broker.namespace.NamespaceService.getHeartbeatNamespaceV2; +import static org.apache.pulsar.broker.namespace.NamespaceService.getSLAMonitorNamespace; +import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -48,10 +55,11 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - import com.google.common.collect.Sets; import java.util.LinkedHashMap; import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -63,8 +71,8 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.concurrent.CompletableFuture; import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -73,6 +81,7 @@ import org.apache.pulsar.broker.loadbalance.LeaderElectionService; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLoadData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.loadbalance.extensions.data.TopBundlesLoadData; @@ -87,20 +96,32 @@ import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; import org.apache.pulsar.broker.lookup.LookupResult; import org.apache.pulsar.broker.namespace.LookupOptions; +import org.apache.pulsar.broker.namespace.NamespaceBundleOwnershipListener; +import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; +import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.impl.TableViewImpl; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.ServiceUnitId; +import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.naming.TopicVersion; +import org.apache.pulsar.common.policies.data.BrokerAssignment; import org.apache.pulsar.common.policies.data.BundlesData; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.NamespaceOwnershipStatus; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicType; import org.apache.pulsar.common.stats.Metrics; +import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; -import org.testcontainers.shaded.org.awaitility.Awaitility; +import org.awaitility.Awaitility; +import org.mockito.MockedStatic; +import org.testng.AssertJUnit; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; @@ -125,23 +146,29 @@ public class ExtensibleLoadManagerImplTest extends MockedPulsarServiceBaseTest { private ServiceUnitStateChannelImpl channel1; private ServiceUnitStateChannelImpl channel2; - @BeforeClass - @Override - public void setup() throws Exception { + private final String defaultTestNamespace = "public/test"; + + private static void initConfig(ServiceConfiguration conf){ conf.setForceDeleteNamespaceAllowed(true); + conf.setAllowAutoTopicCreationType(TopicType.NON_PARTITIONED); conf.setAllowAutoTopicCreation(true); conf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); conf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); conf.setLoadBalancerSheddingEnabled(false); conf.setLoadBalancerDebugModeEnabled(true); + conf.setTopicLevelPoliciesEnabled(true); + } + + @BeforeClass + @Override + public void setup() throws Exception { + // Set the inflight state waiting time and ownership monitor delay time to 5 seconds to avoid + // stuck when doing unload. + initConfig(conf); super.internalSetup(conf); pulsar1 = pulsar; ServiceConfiguration defaultConf = getDefaultConf(); - defaultConf.setAllowAutoTopicCreation(true); - defaultConf.setForceDeleteNamespaceAllowed(true); - defaultConf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); - defaultConf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); - defaultConf.setLoadBalancerSheddingEnabled(false); + initConfig(defaultConf); additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf); pulsar2 = additionalPulsarTestContext.getPulsarService(); @@ -157,20 +184,22 @@ public void setup() throws Exception { admin.namespaces().createNamespace("public/default"); admin.namespaces().setNamespaceReplicationClusters("public/default", Sets.newHashSet(this.conf.getClusterName())); + + admin.namespaces().createNamespace(defaultTestNamespace); + admin.namespaces().setNamespaceReplicationClusters(defaultTestNamespace, + Sets.newHashSet(this.conf.getClusterName())); } @Override - @AfterClass + @AfterClass(alwaysRun = true) protected void cleanup() throws Exception { - pulsar1 = null; - pulsar2.close(); - super.internalCleanup(); this.additionalPulsarTestContext.close(); + super.internalCleanup(); } @BeforeMethod(alwaysRun = true) protected void initializeState() throws PulsarAdminException { - admin.namespaces().unload("public/default"); + admin.namespaces().unload(defaultTestNamespace); reset(primaryLoadManager, secondaryLoadManager); } @@ -194,7 +223,7 @@ public void testAssignInternalTopic() throws Exception { @Test public void testAssign() throws Exception { - TopicName topicName = TopicName.get("test-assign"); + TopicName topicName = TopicName.get(defaultTestNamespace + "/test-assign"); NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); Optional brokerLookupData = primaryLoadManager.assign(Optional.empty(), bundle).get(); assertTrue(brokerLookupData.isPresent()); @@ -203,9 +232,6 @@ public void testAssign() throws Exception { Optional brokerLookupData1 = secondaryLoadManager.assign(Optional.empty(), bundle).get(); assertEquals(brokerLookupData, brokerLookupData1); - verify(primaryLoadManager, times(1)).getBrokerSelectionStrategy(); - verify(secondaryLoadManager, times(0)).getBrokerSelectionStrategy(); - Optional lookupResult = pulsar2.getNamespaceService() .getBrokerServiceUrlAsync(topicName, null).get(); assertTrue(lookupResult.isPresent()); @@ -219,7 +245,8 @@ public void testAssign() throws Exception { @Test public void testCheckOwnershipAsync() throws Exception { - NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get("test-check-ownership")).get(); + TopicName topicName = TopicName.get(defaultTestNamespace + "/test-check-ownership"); + NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); // 1. The bundle is never assigned. assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); assertFalse(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); @@ -239,7 +266,7 @@ public void testCheckOwnershipAsync() throws Exception { @Test public void testFilter() throws Exception { - TopicName topicName = TopicName.get("test-filter"); + TopicName topicName = TopicName.get(defaultTestNamespace + "/test-filter"); NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); doReturn(List.of(new BrokerFilter() { @@ -249,11 +276,11 @@ public String name() { } @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) throws BrokerFilterException { - brokers.remove(pulsar1.getLookupServiceAddress()); - return brokers; + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { + brokers.remove(pulsar1.getBrokerId()); + return CompletableFuture.completedFuture(brokers); } })).when(primaryLoadManager).getBrokerFilterPipeline(); @@ -265,16 +292,16 @@ public Map filter(Map broker @Test public void testFilterHasException() throws Exception { - TopicName topicName = TopicName.get("test-filter-has-exception"); + TopicName topicName = TopicName.get(defaultTestNamespace + "/test-filter-has-exception"); NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); doReturn(List.of(new MockBrokerFilter() { @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) throws BrokerFilterException { - brokers.clear(); - throw new BrokerFilterException("Test"); + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { + brokers.remove(brokers.keySet().iterator().next()); + return FutureUtil.failedFuture(new BrokerFilterException("Test")); } })).when(primaryLoadManager).getBrokerFilterPipeline(); @@ -284,24 +311,62 @@ public Map filter(Map broker @Test(timeOut = 30 * 1000) public void testUnloadAdminAPI() throws Exception { - TopicName topicName = TopicName.get("test-unload"); + TopicName topicName = TopicName.get(defaultTestNamespace + "/test-unload"); NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); + AtomicInteger onloadCount = new AtomicInteger(0); + AtomicInteger unloadCount = new AtomicInteger(0); + + NamespaceBundleOwnershipListener listener = new NamespaceBundleOwnershipListener() { + @Override + public void onLoad(NamespaceBundle bundle) { + onloadCount.incrementAndGet(); + } + + @Override + public void unLoad(NamespaceBundle bundle) { + unloadCount.incrementAndGet(); + } + + @Override + public boolean test(NamespaceBundle namespaceBundle) { + return namespaceBundle.equals(bundle); + } + }; + pulsar1.getNamespaceService().addNamespaceBundleOwnershipListener(listener); + pulsar2.getNamespaceService().addNamespaceBundleOwnershipListener(listener); String broker = admin.lookups().lookupTopic(topicName.toString()); log.info("Assign the bundle {} to {}", bundle, broker); checkOwnershipState(broker, bundle); + Awaitility.await().untilAsserted(() -> { + assertEquals(onloadCount.get(), 1); + assertEquals(unloadCount.get(), 0); + }); + admin.namespaces().unloadNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange()); assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); assertFalse(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + Awaitility.await().untilAsserted(() -> { + assertEquals(onloadCount.get(), 1); + assertEquals(unloadCount.get(), 1); + }); broker = admin.lookups().lookupTopic(topicName.toString()); log.info("Assign the bundle {} to {}", bundle, broker); - String dstBrokerUrl = pulsar1.getLookupServiceAddress(); + String finalBroker = broker; + Awaitility.await().untilAsserted(() -> { + checkOwnershipState(finalBroker, bundle); + assertEquals(onloadCount.get(), 2); + assertEquals(unloadCount.get(), 1); + }); + + + String dstBrokerUrl = pulsar1.getBrokerId(); String dstBrokerServiceUrl; if (broker.equals(pulsar1.getBrokerServiceUrl())) { - dstBrokerUrl = pulsar2.getLookupServiceAddress(); + dstBrokerUrl = pulsar2.getBrokerId(); dstBrokerServiceUrl = pulsar2.getBrokerServiceUrl(); } else { dstBrokerServiceUrl = pulsar1.getBrokerServiceUrl(); @@ -309,6 +374,10 @@ public void testUnloadAdminAPI() throws Exception { checkOwnershipState(broker, bundle); admin.namespaces().unloadNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange(), dstBrokerUrl); + Awaitility.await().untilAsserted(() -> { + assertEquals(onloadCount.get(), 3); + assertEquals(unloadCount.get(), 2); + }); assertEquals(admin.lookups().lookupTopic(topicName.toString()), dstBrokerServiceUrl); @@ -336,7 +405,7 @@ private void checkOwnershipState(String broker, NamespaceBundle bundle) @Test(timeOut = 30 * 1000) public void testSplitBundleAdminAPI() throws Exception { - String namespace = "public/default"; + String namespace = defaultTestNamespace; String topic = "persistent://" + namespace + "/test-split"; admin.topics().createPartitionedTopic(topic, 10); BundlesData bundles = admin.namespaces().getBundles(namespace); @@ -369,7 +438,7 @@ public void testSplitBundleAdminAPI() throws Exception { @Test(timeOut = 30 * 1000) public void testSplitBundleWithSpecificPositionAdminAPI() throws Exception { - String namespace = "public/default"; + String namespace = defaultTestNamespace; String topic = "persistent://" + namespace + "/test-split-with-specific-position"; admin.topics().createPartitionedTopic(topic, 10); BundlesData bundles = admin.namespaces().getBundles(namespace); @@ -386,7 +455,8 @@ public void testSplitBundleWithSpecificPositionAdminAPI() throws Exception { "specified_positions_divide", List.of(bundleRanges.get(0), bundleRanges.get(1), splitPosition)); BundlesData bundlesData = admin.namespaces().getBundles(namespace); - assertEquals(bundlesData.getNumBundles(), numBundles + 1); + Awaitility.waitAtMost(15, TimeUnit.SECONDS) + .untilAsserted(() -> assertEquals(bundlesData.getNumBundles(), numBundles + 1)); String lowBundle = String.format("0x%08x", bundleRanges.get(0)); String midBundle = String.format("0x%08x", splitPosition); String highBundle = String.format("0x%08x", bundleRanges.get(1)); @@ -396,16 +466,29 @@ public void testSplitBundleWithSpecificPositionAdminAPI() throws Exception { } @Test(timeOut = 30 * 1000) public void testDeleteNamespaceBundle() throws Exception { - TopicName topicName = TopicName.get("test-delete-namespace-bundle"); - NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); - - String broker = admin.lookups().lookupTopic(topicName.toString()); - log.info("Assign the bundle {} to {}", bundle, broker); - - checkOwnershipState(broker, bundle); - - admin.namespaces().deleteNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange()); - assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + final String namespace = "public/testDeleteNamespaceBundle"; + admin.namespaces().createNamespace(namespace, 3); + TopicName topicName = TopicName.get(namespace + "/test-delete-namespace-bundle"); + + + Awaitility.await() + .atMost(15, TimeUnit.SECONDS) + .ignoreExceptions() + .untilAsserted(() -> { + NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); + String broker = admin.lookups().lookupTopic(topicName.toString()); + log.info("Assign the bundle {} to {}", bundle, broker); + checkOwnershipState(broker, bundle); + admin.namespaces().deleteNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange(), true); + // this could fail if the system topic lookup asynchronously happens before this. + // we will retry if it fails. + assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + }); + + Awaitility.await() + .atMost(15, TimeUnit.SECONDS) + .ignoreExceptions() + .untilAsserted(() -> admin.namespaces().deleteNamespace(namespace, true)); } @Test(timeOut = 30 * 1000) @@ -445,25 +528,24 @@ public void testCheckOwnershipPresentWithSystemNamespace() throws Exception { @Test public void testMoreThenOneFilter() throws Exception { - TopicName topicName = TopicName.get("test-filter-has-exception"); + TopicName topicName = TopicName.get(defaultTestNamespace + "/test-filter-has-exception"); NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); - String lookupServiceAddress1 = pulsar1.getLookupServiceAddress(); + String brokerId1 = pulsar1.getBrokerId(); doReturn(List.of(new MockBrokerFilter() { @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) throws BrokerFilterException { - brokers.remove(lookupServiceAddress1); - return brokers; + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { + brokers.remove(brokerId1); + return CompletableFuture.completedFuture(brokers); } },new MockBrokerFilter() { @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) throws BrokerFilterException { - brokers.clear(); - throw new BrokerFilterException("Test"); + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { + return FutureUtil.failedFuture(new BrokerFilterException("Test")); } })).when(primaryLoadManager).getBrokerFilterPipeline(); @@ -474,99 +556,164 @@ public Map filter(Map broker @Test public void testDeployAndRollbackLoadManager() throws Exception { - // Test rollback to modular load manager. - ServiceConfiguration defaultConf = getDefaultConf(); - defaultConf.setAllowAutoTopicCreation(true); - defaultConf.setForceDeleteNamespaceAllowed(true); - defaultConf.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); - defaultConf.setLoadBalancerSheddingEnabled(false); - try (var additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf)) { - // start pulsar3 with old load manager - var pulsar3 = additionalPulsarTestContext.getPulsarService(); - String topic = "persistent://public/default/test"; - - String lookupResult1 = pulsar3.getAdminClient().lookups().lookupTopic(topic); - assertEquals(lookupResult1, pulsar3.getBrokerServiceUrl()); - - String lookupResult2 = pulsar1.getAdminClient().lookups().lookupTopic(topic); - String lookupResult3 = pulsar2.getAdminClient().lookups().lookupTopic(topic); - assertEquals(lookupResult1, lookupResult2); - assertEquals(lookupResult1, lookupResult3); - - NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get("test")).get(); - LookupOptions options = LookupOptions.builder() - .authoritative(false) - .requestHttps(false) - .readOnly(false) - .loadTopicsInBundle(false).build(); - Optional webServiceUrl1 = - pulsar1.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl1.isPresent()); - assertEquals(webServiceUrl1.get().toString(), pulsar3.getWebServiceAddress()); - - Optional webServiceUrl2 = - pulsar2.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl2.isPresent()); - assertEquals(webServiceUrl2.get().toString(), webServiceUrl1.get().toString()); - - Optional webServiceUrl3 = - pulsar3.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl3.isPresent()); - assertEquals(webServiceUrl3.get().toString(), webServiceUrl1.get().toString()); - - // Test deploy new broker with new load manager - ServiceConfiguration conf = getDefaultConf(); - conf.setAllowAutoTopicCreation(true); - conf.setForceDeleteNamespaceAllowed(true); - conf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); - conf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); - try (var additionPulsarTestContext = createAdditionalPulsarTestContext(conf)) { - var pulsar4 = additionPulsarTestContext.getPulsarService(); - - Set availableCandidates = Sets.newHashSet(pulsar1.getBrokerServiceUrl(), - pulsar2.getBrokerServiceUrl(), - pulsar4.getBrokerServiceUrl()); - String lookupResult4 = pulsar4.getAdminClient().lookups().lookupTopic(topic); - assertTrue(availableCandidates.contains(lookupResult4)); - - String lookupResult5 = pulsar1.getAdminClient().lookups().lookupTopic(topic); - String lookupResult6 = pulsar2.getAdminClient().lookups().lookupTopic(topic); - String lookupResult7 = pulsar3.getAdminClient().lookups().lookupTopic(topic); - assertEquals(lookupResult4, lookupResult5); - assertEquals(lookupResult4, lookupResult6); - assertEquals(lookupResult4, lookupResult7); - - Set availableWebUrlCandidates = Sets.newHashSet(pulsar1.getWebServiceAddress(), - pulsar2.getWebServiceAddress(), - pulsar4.getWebServiceAddress()); - - webServiceUrl1 = + try (MockedStatic channelMockedStatic = + mockStatic(ServiceUnitStateChannelImpl.class)) { + channelMockedStatic.when(() -> ServiceUnitStateChannelImpl.newInstance(isA(PulsarService.class))) + .thenAnswer(invocation -> { + PulsarService pulsarService = invocation.getArgument(0); + // Set the inflight state waiting time and ownership monitor delay time to 5 seconds to avoid + // stuck when doing unload. + return new ServiceUnitStateChannelImpl(pulsarService, 5 * 1000, 1); + }); + // Test rollback to modular load manager. + ServiceConfiguration defaultConf = getDefaultConf(); + defaultConf.setAllowAutoTopicCreation(true); + defaultConf.setForceDeleteNamespaceAllowed(true); + defaultConf.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); + defaultConf.setLoadBalancerSheddingEnabled(false); + try (var additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf)) { + // start pulsar3 with old load manager + var pulsar3 = additionalPulsarTestContext.getPulsarService(); + String topic = "persistent://" + defaultTestNamespace + "/test"; + + String lookupResult1 = pulsar3.getAdminClient().lookups().lookupTopic(topic); + assertEquals(lookupResult1, pulsar3.getBrokerServiceUrl()); + + String lookupResult2 = pulsar1.getAdminClient().lookups().lookupTopic(topic); + String lookupResult3 = pulsar2.getAdminClient().lookups().lookupTopic(topic); + assertEquals(lookupResult1, lookupResult2); + assertEquals(lookupResult1, lookupResult3); + + NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get(topic)).get(); + LookupOptions options = LookupOptions.builder() + .authoritative(false) + .requestHttps(false) + .readOnly(false) + .loadTopicsInBundle(false).build(); + Optional webServiceUrl1 = pulsar1.getNamespaceService().getWebServiceUrl(bundle, options); assertTrue(webServiceUrl1.isPresent()); - assertTrue(availableWebUrlCandidates.contains(webServiceUrl1.get().toString())); + assertEquals(webServiceUrl1.get().toString(), pulsar3.getWebServiceAddress()); - webServiceUrl2 = + Optional webServiceUrl2 = pulsar2.getNamespaceService().getWebServiceUrl(bundle, options); assertTrue(webServiceUrl2.isPresent()); assertEquals(webServiceUrl2.get().toString(), webServiceUrl1.get().toString()); - // The pulsar3 will redirect to pulsar4 - webServiceUrl3 = + Optional webServiceUrl3 = pulsar3.getNamespaceService().getWebServiceUrl(bundle, options); assertTrue(webServiceUrl3.isPresent()); - // It will redirect to pulsar4 - assertTrue(availableWebUrlCandidates.contains(webServiceUrl3.get().toString())); - - var webServiceUrl4 = - pulsar4.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl4.isPresent()); - assertEquals(webServiceUrl4.get().toString(), webServiceUrl1.get().toString()); + assertEquals(webServiceUrl3.get().toString(), webServiceUrl1.get().toString()); + + List pulsarServices = List.of(pulsar1, pulsar2, pulsar3); + for (PulsarService pulsarService : pulsarServices) { + // Test lookup heartbeat namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupHeartbeatOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + // Test lookup SLA namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupSLANamespaceOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + } + // Test deploy new broker with new load manager + ServiceConfiguration conf = getDefaultConf(); + conf.setAllowAutoTopicCreation(true); + conf.setForceDeleteNamespaceAllowed(true); + conf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); + conf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); + try (var additionPulsarTestContext = createAdditionalPulsarTestContext(conf)) { + var pulsar4 = additionPulsarTestContext.getPulsarService(); + + Set availableCandidates = Sets.newHashSet(pulsar1.getBrokerServiceUrl(), + pulsar2.getBrokerServiceUrl(), + pulsar4.getBrokerServiceUrl()); + String lookupResult4 = pulsar4.getAdminClient().lookups().lookupTopic(topic); + assertTrue(availableCandidates.contains(lookupResult4)); + + String lookupResult5 = pulsar1.getAdminClient().lookups().lookupTopic(topic); + String lookupResult6 = pulsar2.getAdminClient().lookups().lookupTopic(topic); + String lookupResult7 = pulsar3.getAdminClient().lookups().lookupTopic(topic); + assertEquals(lookupResult4, lookupResult5); + assertEquals(lookupResult4, lookupResult6); + assertEquals(lookupResult4, lookupResult7); + + Set availableWebUrlCandidates = Sets.newHashSet(pulsar1.getWebServiceAddress(), + pulsar2.getWebServiceAddress(), + pulsar4.getWebServiceAddress()); + + webServiceUrl1 = + pulsar1.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl1.isPresent()); + assertTrue(availableWebUrlCandidates.contains(webServiceUrl1.get().toString())); + + webServiceUrl2 = + pulsar2.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl2.isPresent()); + assertEquals(webServiceUrl2.get().toString(), webServiceUrl1.get().toString()); + + // The pulsar3 will redirect to pulsar4 + webServiceUrl3 = + pulsar3.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl3.isPresent()); + // It will redirect to pulsar4 + assertTrue(availableWebUrlCandidates.contains(webServiceUrl3.get().toString())); + + var webServiceUrl4 = + pulsar4.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl4.isPresent()); + assertEquals(webServiceUrl4.get().toString(), webServiceUrl1.get().toString()); + + pulsarServices = List.of(pulsar1, pulsar2, pulsar3, pulsar4); + for (PulsarService pulsarService : pulsarServices) { + // Test lookup heartbeat namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupHeartbeatOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + // Test lookup SLA namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupSLANamespaceOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + } + } } } + } - @Test + private void assertLookupHeartbeatOwner(PulsarService pulsar, + String brokerId, + String expectedBrokerServiceUrl) throws Exception { + NamespaceName heartbeatNamespaceV1 = + getHeartbeatNamespace(brokerId, pulsar.getConfiguration()); + + String heartbeatV1Topic = heartbeatNamespaceV1.getPersistentTopicName("test"); + assertEquals(pulsar.getAdminClient().lookups().lookupTopic(heartbeatV1Topic), expectedBrokerServiceUrl); + + NamespaceName heartbeatNamespaceV2 = + getHeartbeatNamespaceV2(brokerId, pulsar.getConfiguration()); + + String heartbeatV2Topic = heartbeatNamespaceV2.getPersistentTopicName("test"); + assertEquals(pulsar.getAdminClient().lookups().lookupTopic(heartbeatV2Topic), expectedBrokerServiceUrl); + } + + private void assertLookupSLANamespaceOwner(PulsarService pulsar, + String brokerId, + String expectedBrokerServiceUrl) throws Exception { + NamespaceName slaMonitorNamespace = getSLAMonitorNamespace(brokerId, pulsar.getConfiguration()); + String slaMonitorTopic = slaMonitorNamespace.getPersistentTopicName("test"); + String result = pulsar.getAdminClient().lookups().lookupTopic(slaMonitorTopic); + log.info("Topic {} Lookup result: {}", slaMonitorTopic, result); + assertNotNull(result); + assertEquals(result, expectedBrokerServiceUrl); + } + + @Test(priority = 10) public void testTopBundlesLoadDataStoreTableViewFromChannelOwner() throws Exception { var topBundlesLoadDataStorePrimary = (LoadDataStore) FieldUtils.readDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", true); @@ -592,6 +739,8 @@ public void testTopBundlesLoadDataStoreTableViewFromChannelOwner() throws Except restartBroker(); pulsar1 = pulsar; setPrimaryLoadManager(); + admin.namespaces().setNamespaceReplicationClusters(defaultTestNamespace, + Sets.newHashSet(this.conf.getClusterName())); var serviceUnitStateChannelPrimaryNew = (ServiceUnitStateChannelImpl) FieldUtils.readDeclaredField(primaryLoadManager, @@ -610,10 +759,7 @@ public void testTopBundlesLoadDataStoreTableViewFromChannelOwner() throws Except } @Test - public void testRoleChange() - throws Exception { - - + public void testRoleChange() throws Exception { var topBundlesLoadDataStorePrimary = (LoadDataStore) FieldUtils.readDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", true); var topBundlesLoadDataStorePrimarySpy = spy(topBundlesLoadDataStorePrimary); @@ -635,7 +781,6 @@ public void testRoleChange() reset(); return null; }).when(topBundlesLoadDataStorePrimarySpy).closeTableView(); - FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStorePrimarySpy, true); var topBundlesLoadDataStoreSecondary = (LoadDataStore) FieldUtils.readDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", true); @@ -658,36 +803,65 @@ public void testRoleChange() reset(); return null; }).when(topBundlesLoadDataStoreSecondarySpy).closeTableView(); - FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStoreSecondarySpy, true); - if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { - primaryLoadManager.playFollower(); + try { + FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStorePrimarySpy, true); + FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStoreSecondarySpy, true); + + + if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { + primaryLoadManager.playLeader(); + secondaryLoadManager.playFollower(); + verify(topBundlesLoadDataStorePrimarySpy, times(3)).startTableView(); + verify(topBundlesLoadDataStorePrimarySpy, times(5)).closeTableView(); + verify(topBundlesLoadDataStoreSecondarySpy, times(0)).startTableView(); + verify(topBundlesLoadDataStoreSecondarySpy, times(3)).closeTableView(); + } else { + primaryLoadManager.playFollower(); + secondaryLoadManager.playLeader(); + verify(topBundlesLoadDataStoreSecondarySpy, times(3)).startTableView(); + verify(topBundlesLoadDataStoreSecondarySpy, times(5)).closeTableView(); + verify(topBundlesLoadDataStorePrimarySpy, times(0)).startTableView(); + verify(topBundlesLoadDataStorePrimarySpy, times(3)).closeTableView(); + } + primaryLoadManager.playFollower(); - secondaryLoadManager.playLeader(); - secondaryLoadManager.playLeader(); - primaryLoadManager.playLeader(); - primaryLoadManager.playLeader(); - secondaryLoadManager.playFollower(); - secondaryLoadManager.playFollower(); - } else { - primaryLoadManager.playLeader(); - primaryLoadManager.playLeader(); secondaryLoadManager.playFollower(); - secondaryLoadManager.playFollower(); - primaryLoadManager.playFollower(); - primaryLoadManager.playFollower(); - secondaryLoadManager.playLeader(); - secondaryLoadManager.playLeader(); - } + if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } else { + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } - verify(topBundlesLoadDataStorePrimarySpy, times(3)).startTableView(); - verify(topBundlesLoadDataStorePrimarySpy, times(3)).closeTableView(); - verify(topBundlesLoadDataStoreSecondarySpy, times(3)).startTableView(); - verify(topBundlesLoadDataStoreSecondarySpy, times(3)).closeTableView(); + primaryLoadManager.playLeader(); + secondaryLoadManager.playLeader(); - FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStorePrimary, true); - FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStoreSecondary, true); + if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } else { + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } + } finally { + FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStorePrimary, true); + FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStoreSecondary, true); + } } @Test @@ -878,6 +1052,247 @@ SplitDecision.Reason.Unknown, new AtomicLong(6)) assertEquals(actual, expected); } + @Test + public void testDisableBroker() throws Exception { + // Test rollback to modular load manager. + ServiceConfiguration defaultConf = getDefaultConf(); + defaultConf.setAllowAutoTopicCreation(true); + defaultConf.setForceDeleteNamespaceAllowed(true); + defaultConf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); + defaultConf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); + defaultConf.setLoadBalancerSheddingEnabled(false); + defaultConf.setLoadBalancerDebugModeEnabled(true); + defaultConf.setTopicLevelPoliciesEnabled(false); + try (var additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf)) { + var pulsar3 = additionalPulsarTestContext.getPulsarService(); + ExtensibleLoadManagerImpl ternaryLoadManager = spy((ExtensibleLoadManagerImpl) + FieldUtils.readField(pulsar3.getLoadManager().get(), "loadManager", true)); + String topic = "persistent://" + defaultTestNamespace +"/test"; + + String lookupResult1 = pulsar3.getAdminClient().lookups().lookupTopic(topic); + TopicName topicName = TopicName.get(topic); + NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); + if (!pulsar3.getBrokerServiceUrl().equals(lookupResult1)) { + admin.namespaces().unloadNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange(), + pulsar3.getBrokerId()); + lookupResult1 = pulsar2.getAdminClient().lookups().lookupTopic(topic); + } + String lookupResult2 = pulsar1.getAdminClient().lookups().lookupTopic(topic); + String lookupResult3 = pulsar2.getAdminClient().lookups().lookupTopic(topic); + + assertEquals(lookupResult1, pulsar3.getBrokerServiceUrl()); + assertEquals(lookupResult1, lookupResult2); + assertEquals(lookupResult1, lookupResult3); + + + assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + assertFalse(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + assertTrue(ternaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + + ternaryLoadManager.disableBroker(); + + assertFalse(ternaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + if (primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()) { + assertFalse(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + } else { + assertTrue(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + } + } + } + + @Test(timeOut = 30 * 1000) + public void testListTopic() throws Exception { + final String namespace = "public/testListTopic"; + admin.namespaces().createNamespace(namespace, 9); + + final String persistentTopicName = TopicName.get( + "persistent", NamespaceName.get(namespace), + "get_topics_mode_" + UUID.randomUUID()).toString(); + + final String nonPersistentTopicName = TopicName.get( + "non-persistent", NamespaceName.get(namespace), + "get_topics_mode_" + UUID.randomUUID()).toString(); + admin.topics().createPartitionedTopic(persistentTopicName, 9); + admin.topics().createPartitionedTopic(nonPersistentTopicName, 9); + pulsarClient.newProducer().topic(persistentTopicName).create().close(); + pulsarClient.newProducer().topic(nonPersistentTopicName).create().close(); + + BundlesData bundlesData = admin.namespaces().getBundles(namespace); + List boundaries = bundlesData.getBoundaries(); + int topicNum = 0; + for (int i = 0; i < boundaries.size() - 1; i++) { + String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); + List topic = admin.topics().getListInBundle(namespace, bundle); + if (topic == null) { + continue; + } + topicNum += topic.size(); + for (String s : topic) { + assertFalse(TopicName.get(s).isPersistent()); + } + } + assertEquals(topicNum, 9); + + List list = admin.topics().getList(namespace); + assertEquals(list.size(), 18); + admin.namespaces().deleteNamespace(namespace, true); + } + + @Test(timeOut = 30 * 1000, priority = -1) + public void testGetOwnedServiceUnitsAndGetOwnedNamespaceStatus() throws Exception { + NamespaceName heartbeatNamespacePulsar1V1 = + getHeartbeatNamespace(pulsar1.getBrokerId(), pulsar1.getConfiguration()); + NamespaceName heartbeatNamespacePulsar1V2 = + NamespaceService.getHeartbeatNamespaceV2(pulsar1.getBrokerId(), pulsar1.getConfiguration()); + + NamespaceName heartbeatNamespacePulsar2V1 = + getHeartbeatNamespace(pulsar2.getBrokerId(), pulsar2.getConfiguration()); + NamespaceName heartbeatNamespacePulsar2V2 = + NamespaceService.getHeartbeatNamespaceV2(pulsar2.getBrokerId(), pulsar2.getConfiguration()); + + NamespaceName slaMonitorNamespacePulsar1 = + getSLAMonitorNamespace(pulsar1.getBrokerId(), pulsar1.getConfiguration()); + + NamespaceName slaMonitorNamespacePulsar2 = + getSLAMonitorNamespace(pulsar2.getBrokerId(), pulsar2.getConfiguration()); + + NamespaceBundle bundle1 = pulsar1.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespacePulsar1V1); + NamespaceBundle bundle2 = pulsar1.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespacePulsar1V2); + + NamespaceBundle bundle3 = pulsar2.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespacePulsar2V1); + NamespaceBundle bundle4 = pulsar2.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespacePulsar2V2); + + NamespaceBundle slaBundle1 = pulsar1.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(slaMonitorNamespacePulsar1); + NamespaceBundle slaBundle2 = pulsar2.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(slaMonitorNamespacePulsar2); + + + Set ownedServiceUnitsByPulsar1 = primaryLoadManager.getOwnedServiceUnits(); + log.info("Owned service units: {}", ownedServiceUnitsByPulsar1); + // heartbeat namespace bundle will own by pulsar1 + assertTrue(ownedServiceUnitsByPulsar1.contains(bundle1)); + assertTrue(ownedServiceUnitsByPulsar1.contains(bundle2)); + assertTrue(ownedServiceUnitsByPulsar1.contains(slaBundle1)); + Set ownedServiceUnitsByPulsar2 = secondaryLoadManager.getOwnedServiceUnits(); + log.info("Owned service units: {}", ownedServiceUnitsByPulsar2); + assertTrue(ownedServiceUnitsByPulsar2.contains(bundle3)); + assertTrue(ownedServiceUnitsByPulsar2.contains(bundle4)); + assertTrue(ownedServiceUnitsByPulsar2.contains(slaBundle2)); + Map ownedNamespacesByPulsar1 = + admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar1.getBrokerId()); + Map ownedNamespacesByPulsar2 = + admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar2.getBrokerId()); + assertTrue(ownedNamespacesByPulsar1.containsKey(bundle1.toString())); + assertTrue(ownedNamespacesByPulsar1.containsKey(bundle2.toString())); + assertTrue(ownedNamespacesByPulsar1.containsKey(slaBundle1.toString())); + + assertTrue(ownedNamespacesByPulsar2.containsKey(bundle3.toString())); + assertTrue(ownedNamespacesByPulsar2.containsKey(bundle4.toString())); + assertTrue(ownedNamespacesByPulsar2.containsKey(slaBundle2.toString())); + + String topic = "persistent://" + defaultTestNamespace + "/test-get-owned-service-units"; + admin.topics().createPartitionedTopic(topic, 1); + NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get(topic)).join(); + CompletableFuture> owner = primaryLoadManager.assign(Optional.empty(), bundle); + assertFalse(owner.join().isEmpty()); + + BrokerLookupData brokerLookupData = owner.join().get(); + if (brokerLookupData.getWebServiceUrl().equals(pulsar1.getWebServiceAddress())) { + assertOwnedServiceUnits(pulsar1, primaryLoadManager, bundle); + } else { + assertOwnedServiceUnits(pulsar2, secondaryLoadManager, bundle); + } + } + + private void assertOwnedServiceUnits( + PulsarService pulsar, + ExtensibleLoadManagerImpl extensibleLoadManager, + NamespaceBundle bundle) throws PulsarAdminException { + Awaitility.await().untilAsserted(() -> { + Set ownedBundles = extensibleLoadManager.getOwnedServiceUnits(); + assertTrue(ownedBundles.contains(bundle)); + }); + Map ownedNamespaces = + admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar.getBrokerId()); + assertTrue(ownedNamespaces.containsKey(bundle.toString())); + NamespaceOwnershipStatus status = ownedNamespaces.get(bundle.toString()); + assertTrue(status.is_active); + assertFalse(status.is_controlled); + assertEquals(status.broker_assignment, BrokerAssignment.shared); + } + + @Test(timeOut = 30 * 1000) + public void testGetOwnedServiceUnitsWhenLoadManagerNotStart() { + ExtensibleLoadManagerImpl loadManager = new ExtensibleLoadManagerImpl(); + Set ownedServiceUnits = loadManager.getOwnedServiceUnits(); + assertNotNull(ownedServiceUnits); + assertTrue(ownedServiceUnits.isEmpty()); + } + + @Test(timeOut = 30 * 1000) + public void testTryAcquiringOwnership() + throws PulsarAdminException, ExecutionException, InterruptedException { + final String namespace = "public/testTryAcquiringOwnership"; + admin.namespaces().createNamespace(namespace, 1); + String topic = "persistent://" + namespace + "/test"; + NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get(topic)).get(); + NamespaceEphemeralData namespaceEphemeralData = primaryLoadManager.tryAcquiringOwnership(bundle).get(); + assertTrue(Set.of(pulsar1.getBrokerServiceUrl(), pulsar2.getBrokerServiceUrl()) + .contains(namespaceEphemeralData.getNativeUrl())); + admin.namespaces().deleteNamespace(namespace, true); + } + + @Test(timeOut = 30 * 1000) + public void testHealthcheck() throws PulsarAdminException { + admin.brokers().healthcheck(TopicVersion.V2); + } + + @Test(timeOut = 30 * 1000) + public void compactionScheduleTest() { + Awaitility.await() + .pollInterval(200, TimeUnit.MILLISECONDS) + .atMost(30, TimeUnit.SECONDS) + .ignoreExceptions() + .untilAsserted(() -> { // wait until true + primaryLoadManager.monitor(); + secondaryLoadManager.monitor(); + var threshold = admin.topicPolicies() + .getCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, false); + AssertJUnit.assertEquals(5 * 1024 * 1024, threshold == null ? 0 : threshold.longValue()); + }); + } + + @Test(timeOut = 10 * 1000) + public void unloadTimeoutCheckTest() + throws Exception { + Pair topicAndBundle = getBundleIsNotOwnByChangeEventTopic("unload-timeout"); + String topic = topicAndBundle.getLeft().toString(); + var bundle = topicAndBundle.getRight().toString(); + var releasing = new ServiceUnitStateData(Releasing, pulsar2.getBrokerId(), pulsar1.getBrokerId(), 1); + overrideTableView(channel1, bundle, releasing); + var topicFuture = pulsar1.getBrokerService().getOrCreateTopic(topic); + + + try { + topicFuture.get(1, TimeUnit.SECONDS); + } catch (Exception e) { + log.info("getOrCreateTopic failed", e); + if (!(e.getCause() instanceof BrokerServiceException.ServiceUnitNotReadyException && e.getMessage() + .contains("Please redo the lookup"))) { + fail(); + } + } + + pulsar1.getBrokerService() + .unloadServiceUnit(topicAndBundle.getRight(), true, 5, + TimeUnit.SECONDS).get(2, TimeUnit.SECONDS); + } + private static abstract class MockBrokerFilter implements BrokerFilter { @Override @@ -910,4 +1325,20 @@ private void setSecondaryLoadManager() throws IllegalAccessException { private CompletableFuture getBundleAsync(PulsarService pulsar, TopicName topic) { return pulsar.getNamespaceService().getBundleAsync(topic); } + + private Pair getBundleIsNotOwnByChangeEventTopic(String topicNamePrefix) + throws Exception { + TopicName changeEventsTopicName = + TopicName.get(defaultTestNamespace + "/" + SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME); + NamespaceBundle changeEventsBundle = getBundleAsync(pulsar1, changeEventsTopicName).get(); + int i = 0; + while (true) { + TopicName topicName = TopicName.get(defaultTestNamespace + "/" + topicNamePrefix + "-" + i); + NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); + if (!bundle.equals(changeEventsBundle)) { + return Pair.of(topicName, bundle); + } + i++; + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java index 77c80187a63e1..dc9844366771c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java @@ -37,9 +37,10 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertThrows; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.expectThrows; +import static org.testng.AssertJUnit.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; @@ -53,12 +54,14 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNull; import java.lang.reflect.Field; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -83,10 +86,9 @@ import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.TypedMessageBuilder; import org.apache.pulsar.client.impl.TableViewImpl; -import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.apache.pulsar.common.policies.data.TopicType; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.coordination.LeaderElectionState; @@ -104,8 +106,8 @@ public class ServiceUnitStateChannelTest extends MockedPulsarServiceBaseTest { private PulsarService pulsar2; private ServiceUnitStateChannel channel1; private ServiceUnitStateChannel channel2; - private String lookupServiceAddress1; - private String lookupServiceAddress2; + private String brokerId1; + private String brokerId2; private String bundle; private String bundle1; private String bundle2; @@ -129,6 +131,7 @@ public class ServiceUnitStateChannelTest extends MockedPulsarServiceBaseTest { @Override protected void setup() throws Exception { conf.setAllowAutoTopicCreation(true); + conf.setAllowAutoTopicCreationType(TopicType.PARTITIONED); conf.setLoadBalancerDebugModeEnabled(true); conf.setBrokerServiceCompactionMonitorIntervalInSeconds(10); super.internalSetup(conf); @@ -152,10 +155,10 @@ protected void setup() throws Exception { channel2 = createChannel(pulsar2); channel2.start(); - lookupServiceAddress1 = (String) - FieldUtils.readDeclaredField(channel1, "lookupServiceAddress", true); - lookupServiceAddress2 = (String) - FieldUtils.readDeclaredField(channel2, "lookupServiceAddress", true); + brokerId1 = (String) + FieldUtils.readDeclaredField(channel1, "brokerId", true); + brokerId2 = (String) + FieldUtils.readDeclaredField(channel2, "brokerId", true); bundle = "public/default/0x00000000_0xffffffff"; bundle1 = "public/default/0x00000000_0xfffffff0"; @@ -197,7 +200,7 @@ protected void cleanup() throws Exception { super.internalCleanup(); } - @Test(priority = 0) + @Test(priority = -1) public void channelOwnerTest() throws Exception { var channelOwner1 = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); var channelOwner2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); @@ -215,7 +218,7 @@ public void channelOwnerTest() throws Exception { assertEquals(newChannelOwner1, newChannelOwner2); assertNotEquals(channelOwner1, newChannelOwner1); - if (newChannelOwner1.equals(Optional.of(lookupServiceAddress1))) { + if (newChannelOwner1.equals(Optional.of(brokerId1))) { assertTrue(channel1.isChannelOwnerAsync().get(2, TimeUnit.SECONDS)); assertFalse(channel2.isChannelOwnerAsync().get(2, TimeUnit.SECONDS)); } else { @@ -299,7 +302,7 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) } } try { - channel.publishAssignEventAsync(bundle, lookupServiceAddress1).get(2, TimeUnit.SECONDS); + channel.publishAssignEventAsync(bundle, brokerId1).get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { if (e.getCause() instanceof IllegalStateException) { errorCnt++; @@ -307,7 +310,7 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) } try { channel.publishUnloadEventAsync( - new Unload(lookupServiceAddress1, bundle, Optional.of(lookupServiceAddress2))) + new Unload(brokerId1, bundle, Optional.of(brokerId2))) .get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { if (e.getCause() instanceof IllegalStateException) { @@ -315,7 +318,7 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) } } try { - Split split = new Split(bundle, lookupServiceAddress1, Map.of( + Split split = new Split(bundle, brokerId1, Map.of( childBundle1Range, Optional.empty(), childBundle2Range, Optional.empty())); channel.publishSplitEventAsync(split) .get(2, TimeUnit.SECONDS); @@ -327,23 +330,6 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) return errorCnt; } - @Test(priority = 1) - public void compactionScheduleTest() { - - Awaitility.await() - .pollInterval(200, TimeUnit.MILLISECONDS) - .atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> { // wait until true - try { - var threshold = admin.topicPolicies() - .getCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, false).longValue(); - assertEquals(5 * 1024 * 1024, threshold); - } catch (Exception e) { - ; - } - }); - } - @Test(priority = 2) public void assignmentTest() throws ExecutionException, InterruptedException, IllegalAccessException, TimeoutException { @@ -356,8 +342,8 @@ public void assignmentTest() assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - var assigned1 = channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - var assigned2 = channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + var assigned1 = channel1.publishAssignEventAsync(bundle, brokerId1); + var assigned2 = channel2.publishAssignEventAsync(bundle, brokerId2); assertNotNull(assigned1); assertNotNull(assigned2); waitUntilOwnerChanges(channel1, bundle, null); @@ -366,8 +352,8 @@ public void assignmentTest() String assignedAddr2 = assigned2.get(5, TimeUnit.SECONDS); assertEquals(assignedAddr1, assignedAddr2); - assertTrue(assignedAddr1.equals(lookupServiceAddress1) - || assignedAddr1.equals(lookupServiceAddress2), assignedAddr1); + assertTrue(assignedAddr1.equals(brokerId1) + || assignedAddr1.equals(brokerId2), assignedAddr1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); @@ -408,13 +394,13 @@ public void assignmentTestWhenOneAssignmentFails() assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - var owner3 = channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - var owner4 = channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + var owner3 = channel1.publishAssignEventAsync(bundle, brokerId1); + var owner4 = channel2.publishAssignEventAsync(bundle, brokerId2); assertTrue(owner3.isCompletedExceptionally()); assertNotNull(owner4); String ownerAddrOpt2 = owner4.get(5, TimeUnit.SECONDS); - assertEquals(ownerAddrOpt2, lookupServiceAddress2); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); + assertEquals(ownerAddrOpt2, brokerId2); + waitUntilNewOwner(channel1, bundle, brokerId2); assertEquals(0, getOwnerRequests1.size()); assertEquals(0, getOwnerRequests2.size()); @@ -432,25 +418,25 @@ public void transferTest() assertTrue(owner2.get().isEmpty()); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.of(lookupServiceAddress2)); + Unload unload = new Unload(brokerId1, bundle, Optional.of(brokerId2)); channel1.publishUnloadEventAsync(unload); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle, brokerId2); + waitUntilNewOwner(channel2, bundle, brokerId2); ownerAddr1 = channel1.getOwnerAsync(bundle).get(5, TimeUnit.SECONDS); ownerAddr2 = channel2.getOwnerAsync(bundle).get(5, TimeUnit.SECONDS); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress2)); + assertEquals(ownerAddr1, Optional.of(brokerId2)); validateHandlerCounters(channel1, 2, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0); validateHandlerCounters(channel2, 2, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -467,14 +453,14 @@ public void transferTestWhenDestBrokerFails() assertEquals(0, getOwnerRequests1.size()); assertEquals(0, getOwnerRequests2.size()); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); var producer = (Producer) FieldUtils.readDeclaredField(channel1, "producer", true); @@ -490,7 +476,7 @@ public void transferTestWhenDestBrokerFails() "inFlightStateWaitingTimeInMillis", 3 * 1000, true); FieldUtils.writeDeclaredField(channel2, "inFlightStateWaitingTimeInMillis", 3 * 1000, true); - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.of(lookupServiceAddress2)); + Unload unload = new Unload(brokerId1, bundle, Optional.of(brokerId2)); channel1.publishUnloadEventAsync(unload); // channel1 is broken. the ownership transfer won't be complete. waitUntilState(channel1, bundle); @@ -498,24 +484,22 @@ public void transferTestWhenDestBrokerFails() var owner1 = channel1.getOwnerAsync(bundle); var owner2 = channel2.getOwnerAsync(bundle); - assertFalse(owner1.isDone()); + assertTrue(owner1.isDone()); + assertEquals(brokerId2, owner1.get().get()); assertFalse(owner2.isDone()); - assertEquals(1, getOwnerRequests1.size()); + assertEquals(0, getOwnerRequests1.size()); assertEquals(1, getOwnerRequests2.size()); - // In 5 secs, the getOwnerAsync requests(lookup requests) should time out. - Awaitility.await().atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> assertTrue(owner1.isCompletedExceptionally())); - Awaitility.await().atMost(5, TimeUnit.SECONDS) + // In 10 secs, the getOwnerAsync requests(lookup requests) should time out. + Awaitility.await().atMost(10, TimeUnit.SECONDS) .untilAsserted(() -> assertTrue(owner2.isCompletedExceptionally())); - assertEquals(0, getOwnerRequests1.size()); assertEquals(0, getOwnerRequests2.size()); // recovered, check the monitor update state : Assigned -> Owned - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress1))) - .when(loadManager).selectAsync(any()); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId1))) + .when(loadManager).selectAsync(any(), any()); FieldUtils.writeDeclaredField(channel2, "producer", producer, true); FieldUtils.writeDeclaredField(channel1, "inFlightStateWaitingTimeInMillis", 1 , true); @@ -523,18 +507,18 @@ public void transferTestWhenDestBrokerFails() "inFlightStateWaitingTimeInMillis", 1 , true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); ownerAddr1 = channel1.getOwnerAsync(bundle).get(); ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); var leader = channel1.isChannelOwnerAsync().get() ? channel1 : channel2; validateMonitorCounters(leader, @@ -555,13 +539,13 @@ public void transferTestWhenDestBrokerFails() @Test(priority = 6) public void splitAndRetryTest() throws Exception { - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); - assertEquals(ownerAddr2, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); + assertEquals(ownerAddr2, Optional.of(brokerId1)); assertTrue(ownerAddr1.isPresent()); NamespaceService namespaceService = spy(pulsar1.getNamespaceService()); @@ -602,14 +586,14 @@ public void splitAndRetryTest() throws Exception { - waitUntilNewOwner(channel1, childBundle11, lookupServiceAddress1); - waitUntilNewOwner(channel1, childBundle12, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle11, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle12, lookupServiceAddress1); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle11).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle12).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle11).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle12).get()); + waitUntilNewOwner(channel1, childBundle11, brokerId1); + waitUntilNewOwner(channel1, childBundle12, brokerId1); + waitUntilNewOwner(channel2, childBundle11, brokerId1); + waitUntilNewOwner(channel2, childBundle12, brokerId1); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle11).get()); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle12).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle11).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle12).get()); // try the monitor and check the monitor moves `Deleted` -> `Init` @@ -624,9 +608,9 @@ public void splitAndRetryTest() throws Exception { "semiTerminalStateWaitingTimeInMillis", 1, true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); waitUntilState(channel1, bundle, Init); waitUntilState(channel2, bundle, Init); @@ -713,20 +697,22 @@ public void handleBrokerDeletionEventTest() var cleanupJobs1 = getCleanupJobs(channel1); var cleanupJobs2 = getCleanupJobs(channel2); - var leaderCleanupJobs = spy(cleanupJobs1); - var followerCleanupJobs = spy(cleanupJobs2); + var leaderCleanupJobsTmp = spy(cleanupJobs1); + var followerCleanupJobsTmp = spy(cleanupJobs2); var leaderChannel = channel1; var followerChannel = channel2; String leader = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); assertEquals(leader, leader2); - if (leader.equals(lookupServiceAddress2)) { + if (leader.equals(brokerId2)) { leaderChannel = channel2; followerChannel = channel1; - var tmp = followerCleanupJobs; - followerCleanupJobs = leaderCleanupJobs; - leaderCleanupJobs = tmp; + var tmp = followerCleanupJobsTmp; + followerCleanupJobsTmp = leaderCleanupJobsTmp; + leaderCleanupJobsTmp = tmp; } + final var leaderCleanupJobs = leaderCleanupJobsTmp; + final var followerCleanupJobs = followerCleanupJobsTmp; FieldUtils.writeDeclaredField(leaderChannel, "cleanupJobs", leaderCleanupJobs, true); FieldUtils.writeDeclaredField(followerChannel, "cleanupJobs", followerCleanupJobs, @@ -734,22 +720,24 @@ public void handleBrokerDeletionEventTest() var owner1 = channel1.getOwnerAsync(bundle1); var owner2 = channel2.getOwnerAsync(bundle2); - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) - .when(loadManager).selectAsync(any()); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) + .when(loadManager).selectAsync(any(), any()); assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - String broker = lookupServiceAddress1; + String broker = brokerId1; channel1.publishAssignEventAsync(bundle1, broker); channel2.publishAssignEventAsync(bundle2, broker); + waitUntilNewOwner(channel1, bundle1, broker); waitUntilNewOwner(channel2, bundle1, broker); waitUntilNewOwner(channel1, bundle2, broker); waitUntilNewOwner(channel2, bundle2, broker); - channel1.publishUnloadEventAsync(new Unload(broker, bundle1, Optional.of(lookupServiceAddress2))); - waitUntilNewOwner(channel1, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle1, lookupServiceAddress2); + // Verify to transfer the ownership to the other broker. + channel1.publishUnloadEventAsync(new Unload(broker, bundle1, Optional.of(brokerId2))); + waitUntilNewOwner(channel1, bundle1, brokerId2); + waitUntilNewOwner(channel2, bundle1, brokerId2); // test stable metadata state leaderChannel.handleMetadataSessionEvent(SessionReestablished); @@ -760,31 +748,35 @@ public void handleBrokerDeletionEventTest() System.currentTimeMillis() - (MAX_CLEAN_UP_DELAY_TIME_IN_SECS * 1000 + 1000), true); leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); followerChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); + leaderChannel.handleBrokerRegistrationEvent(brokerId2, NotificationType.Deleted); + followerChannel.handleBrokerRegistrationEvent(brokerId2, NotificationType.Deleted); - waitUntilNewOwner(channel1, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel1, bundle2, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle2, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle1, brokerId2); + waitUntilNewOwner(channel2, bundle1, brokerId2); + waitUntilNewOwner(channel1, bundle2, brokerId2); + waitUntilNewOwner(channel2, bundle2, brokerId2); verify(leaderCleanupJobs, times(1)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(0, leaderCleanupJobs.size()); + assertEquals(0, followerCleanupJobs.size()); + }); - assertEquals(0, leaderCleanupJobs.size()); - assertEquals(0, followerCleanupJobs.size()); validateMonitorCounters(leaderChannel, - 1, + 2, 0, - 1, + 3, 0, - 1, + 2, 0, 0); // test jittery metadata state - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle1, Optional.of(broker))); - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle2, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle1, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle2, Optional.of(broker))); waitUntilNewOwner(channel1, bundle1, broker); waitUntilNewOwner(channel2, bundle1, broker); waitUntilNewOwner(channel1, bundle2, broker); @@ -797,14 +789,18 @@ public void handleBrokerDeletionEventTest() verify(leaderCleanupJobs, times(2)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); - assertEquals(1, leaderCleanupJobs.size()); - assertEquals(0, followerCleanupJobs.size()); + + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(1, leaderCleanupJobs.size()); + assertEquals(0, followerCleanupJobs.size()); + }); + validateMonitorCounters(leaderChannel, - 1, + 2, 0, - 1, + 3, 0, - 2, + 3, 0, 0); @@ -814,14 +810,18 @@ public void handleBrokerDeletionEventTest() verify(leaderCleanupJobs, times(2)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); - assertEquals(0, leaderCleanupJobs.size()); - assertEquals(0, followerCleanupJobs.size()); + + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(0, leaderCleanupJobs.size()); + assertEquals(0, followerCleanupJobs.size()); + }); + validateMonitorCounters(leaderChannel, - 1, + 2, 0, - 1, + 3, 0, - 2, + 3, 0, 1); @@ -833,39 +833,45 @@ public void handleBrokerDeletionEventTest() verify(leaderCleanupJobs, times(3)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); - assertEquals(1, leaderCleanupJobs.size()); - assertEquals(0, followerCleanupJobs.size()); + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(1, leaderCleanupJobs.size()); + assertEquals(0, followerCleanupJobs.size()); + }); + validateMonitorCounters(leaderChannel, - 1, - 0, - 1, + 2, 0, 3, 0, + 4, + 0, 1); // finally cleanup - waitUntilNewOwner(channel1, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel1, bundle2, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle2, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle1, brokerId2); + waitUntilNewOwner(channel2, bundle1, brokerId2); + waitUntilNewOwner(channel1, bundle2, brokerId2); + waitUntilNewOwner(channel2, bundle2, brokerId2); verify(leaderCleanupJobs, times(3)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); - assertEquals(0, leaderCleanupJobs.size()); - assertEquals(0, followerCleanupJobs.size()); + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(0, leaderCleanupJobs.size()); + assertEquals(0, followerCleanupJobs.size()); + }); + validateMonitorCounters(leaderChannel, - 2, - 0, 3, 0, - 3, + 5, + 0, + 4, 0, 1); // test unstable state - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle1, Optional.of(broker))); - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle2, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle1, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle2, Optional.of(broker))); waitUntilNewOwner(channel1, bundle1, broker); waitUntilNewOwner(channel2, bundle1, broker); waitUntilNewOwner(channel1, bundle2, broker); @@ -878,14 +884,17 @@ public void handleBrokerDeletionEventTest() verify(leaderCleanupJobs, times(3)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); - assertEquals(0, leaderCleanupJobs.size()); - assertEquals(0, followerCleanupJobs.size()); + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(0, leaderCleanupJobs.size()); + assertEquals(0, followerCleanupJobs.size()); + }); + validateMonitorCounters(leaderChannel, - 2, - 0, 3, 0, - 3, + 5, + 0, + 4, 1, 1); @@ -898,25 +907,24 @@ public void handleBrokerDeletionEventTest() } @Test(priority = 10) - public void conflictAndCompactionTest() throws ExecutionException, InterruptedException, TimeoutException, - IllegalAccessException, PulsarClientException, PulsarServerException { + public void conflictAndCompactionTest() throws Exception { String bundle = String.format("%s/%s", "public/default", "0x0000000a_0xffffffff"); var owner1 = channel1.getOwnerAsync(bundle); var owner2 = channel2.getOwnerAsync(bundle); assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - var assigned1 = channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + var assigned1 = channel1.publishAssignEventAsync(bundle, brokerId1); assertNotNull(assigned1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); String assignedAddr1 = assigned1.get(5, TimeUnit.SECONDS); - assertEquals(lookupServiceAddress1, assignedAddr1); + assertEquals(brokerId1, assignedAddr1); FieldUtils.writeDeclaredField(channel2, "inFlightStateWaitingTimeInMillis", 3 * 1000, true); - var assigned2 = channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + var assigned2 = channel2.publishAssignEventAsync(bundle, brokerId2); assertNotNull(assigned2); Exception ex = null; try { @@ -924,35 +932,49 @@ public void conflictAndCompactionTest() throws ExecutionException, InterruptedEx } catch (CompletionException e) { ex = e; } - assertNotNull(ex); - assertEquals(TimeoutException.class, ex.getCause().getClass()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(bundle).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(bundle).get()); + assertNull(ex); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(bundle).get()); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(bundle).get()); var compactor = spy (pulsar1.getStrategicCompactor()); Field strategicCompactorField = FieldUtils.getDeclaredField(PulsarService.class, "strategicCompactor", true); FieldUtils.writeField(strategicCompactorField, pulsar1, compactor, true); FieldUtils.writeField(strategicCompactorField, pulsar2, compactor, true); - Awaitility.await() - .pollInterval(200, TimeUnit.MILLISECONDS) - .atMost(140, TimeUnit.SECONDS) - .untilAsserted(() -> { - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - verify(compactor, times(1)) - .compact(eq(ServiceUnitStateChannelImpl.TOPIC), any()); - }); + + var threshold = admin.topicPolicies() + .getCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC); + admin.topicPolicies() + .setCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, 0); + + try { + Awaitility.await() + .pollInterval(200, TimeUnit.MILLISECONDS) + .atMost(140, TimeUnit.SECONDS) + .untilAsserted(() -> { + channel1.publishAssignEventAsync(bundle, brokerId1); + verify(compactor, times(1)) + .compact(eq(ServiceUnitStateChannelImpl.TOPIC), any()); + }); + + + var channel3 = createChannel(pulsar); + channel3.start(); + Awaitility.await() + .pollInterval(200, TimeUnit.MILLISECONDS) + .atMost(5, TimeUnit.SECONDS) + .untilAsserted(() -> assertEquals( + channel3.getOwnerAsync(bundle).get(), Optional.of(brokerId1))); + channel3.close(); + } finally { + FieldUtils.writeDeclaredField(channel2, + "inFlightStateWaitingTimeInMillis", 30 * 1000, true); + if (threshold != null) { + admin.topicPolicies() + .setCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, threshold); + } + } - var channel3 = createChannel(pulsar); - channel3.start(); - Awaitility.await() - .pollInterval(200, TimeUnit.MILLISECONDS) - .atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> assertEquals( - channel3.getOwnerAsync(bundle).get(), Optional.of(lookupServiceAddress1))); - channel3.close(); - FieldUtils.writeDeclaredField(channel2, - "inFlightStateWaitingTimeInMillis", 30 * 1000, true); } @Test(priority = 11) @@ -994,16 +1016,16 @@ public void ownerLookupCountTests() throws IllegalAccessException { public void unloadTest() throws ExecutionException, InterruptedException, IllegalAccessException { - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.empty()); + assertEquals(ownerAddr1, Optional.of(brokerId1)); + Unload unload = new Unload(brokerId1, bundle, Optional.empty()); channel1.publishUnloadEventAsync(unload); @@ -1015,17 +1037,17 @@ public void unloadTest() assertEquals(Optional.empty(), owner1.get()); assertEquals(Optional.empty(), owner2.get()); - channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + channel2.publishAssignEventAsync(bundle, brokerId2); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle, brokerId2); + waitUntilNewOwner(channel2, bundle, brokerId2); ownerAddr1 = channel1.getOwnerAsync(bundle).get(); ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress2)); - Unload unload2 = new Unload(lookupServiceAddress2, bundle, Optional.empty()); + assertEquals(ownerAddr1, Optional.of(brokerId2)); + Unload unload2 = new Unload(brokerId2, bundle, Optional.empty()); channel2.publishUnloadEventAsync(unload2); @@ -1044,9 +1066,9 @@ public void unloadTest() "semiTerminalStateWaitingTimeInMillis", 1, true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); waitUntilState(channel1, bundle, Init); waitUntilState(channel2, bundle, Init); @@ -1076,7 +1098,7 @@ public void unloadTest() public void assignTestWhenDestBrokerProducerFails() throws ExecutionException, InterruptedException, IllegalAccessException { - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.empty()); + Unload unload = new Unload(brokerId1, bundle, Optional.empty()); channel1.publishUnloadEventAsync(unload); @@ -1100,22 +1122,20 @@ public void assignTestWhenDestBrokerProducerFails() "inFlightStateWaitingTimeInMillis", 3 * 1000, true); FieldUtils.writeDeclaredField(channel2, "inFlightStateWaitingTimeInMillis", 3 * 1000, true); - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) - .when(loadManager).selectAsync(any()); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress2); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) + .when(loadManager).selectAsync(any(), any()); + channel1.publishAssignEventAsync(bundle, brokerId2); // channel1 is broken. the assign won't be complete. waitUntilState(channel1, bundle); waitUntilState(channel2, bundle); var owner1 = channel1.getOwnerAsync(bundle); var owner2 = channel2.getOwnerAsync(bundle); - assertFalse(owner1.isDone()); + assertTrue(owner1.isDone()); assertFalse(owner2.isDone()); - // In 5 secs, the getOwnerAsync requests(lookup requests) should time out. - Awaitility.await().atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> assertTrue(owner1.isCompletedExceptionally())); - Awaitility.await().atMost(5, TimeUnit.SECONDS) + // In 10 secs, the getOwnerAsync requests(lookup requests) should time out. + Awaitility.await().atMost(10, TimeUnit.SECONDS) .untilAsserted(() -> assertTrue(owner2.isCompletedExceptionally())); // recovered, check the monitor update state : Assigned -> Owned @@ -1126,18 +1146,18 @@ public void assignTestWhenDestBrokerProducerFails() "inFlightStateWaitingTimeInMillis", 1 , true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle, brokerId2); + waitUntilNewOwner(channel2, bundle, brokerId2); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress2)); + assertEquals(ownerAddr1, Optional.of(brokerId2)); var leader = channel1.isChannelOwnerAsync().get() ? channel1 : channel2; validateMonitorCounters(leader, @@ -1161,20 +1181,20 @@ public void splitTestWhenProducerFails() throws ExecutionException, InterruptedException, IllegalAccessException { - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.empty()); + Unload unload = new Unload(brokerId1, bundle, Optional.empty()); channel1.publishUnloadEventAsync(unload); waitUntilState(channel1, bundle, Free); waitUntilState(channel2, bundle, Free); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); waitUntilState(channel1, bundle, Owned); waitUntilState(channel2, bundle, Owned); - assertEquals(lookupServiceAddress1, channel1.getOwnerAsync(bundle).get().get()); - assertEquals(lookupServiceAddress1, channel2.getOwnerAsync(bundle).get().get()); + assertEquals(brokerId1, channel1.getOwnerAsync(bundle).get().get()); + assertEquals(brokerId1, channel2.getOwnerAsync(bundle).get().get()); var producer = (Producer) FieldUtils.readDeclaredField(channel1, "producer", true); @@ -1193,7 +1213,7 @@ public void splitTestWhenProducerFails() // Assert child bundle ownerships in the channels. - Split split = new Split(bundle, lookupServiceAddress1, Map.of( + Split split = new Split(bundle, brokerId1, Map.of( childBundle1Range, Optional.empty(), childBundle2Range, Optional.empty())); channel2.publishSplitEventAsync(split); // channel1 is broken. the split won't be complete. @@ -1240,13 +1260,13 @@ public void testIsOwner() throws IllegalAccessException { assertFalse(owner1); assertFalse(owner2); - owner1 = channel1.isOwner(bundle, lookupServiceAddress2); - owner2 = channel2.isOwner(bundle, lookupServiceAddress1); + owner1 = channel1.isOwner(bundle, brokerId2); + owner2 = channel2.isOwner(bundle, brokerId1); assertFalse(owner1); assertFalse(owner2); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); owner2 = channel2.isOwner(bundle); assertFalse(owner2); @@ -1259,49 +1279,111 @@ public void testIsOwner() throws IllegalAccessException { assertTrue(owner1); assertFalse(owner2); - owner1 = channel1.isOwner(bundle, lookupServiceAddress1); - owner2 = channel2.isOwner(bundle, lookupServiceAddress2); + owner1 = channel1.isOwner(bundle, brokerId1); + owner2 = channel2.isOwner(bundle, brokerId2); assertTrue(owner1); assertFalse(owner2); - owner1 = channel2.isOwner(bundle, lookupServiceAddress1); - owner2 = channel1.isOwner(bundle, lookupServiceAddress2); + owner1 = channel2.isOwner(bundle, brokerId1); + owner2 = channel1.isOwner(bundle, brokerId2); assertTrue(owner1); assertFalse(owner2); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, brokerId1, 1)); assertTrue(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, null, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, brokerId1, 1)); assertTrue(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Free, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Free, null, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Deleted, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Deleted, null, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); overrideTableView(channel1, bundle, null); assertFalse(channel1.isOwner(bundle)); } + @Test(priority = 15) + public void testGetOwnerAsync() throws Exception { + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, brokerId1, 1)); + var owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId1, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(!owner.isDone()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(!owner.isDone()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(Optional.empty(), owner.get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId1, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Free, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(Optional.empty(), owner.get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Deleted, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertTrue(owner.isCompletedExceptionally()); + + overrideTableView(channel1, bundle, null); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(Optional.empty(), owner.get()); + } + @Test(priority = 16) public void splitAndRetryFailureTest() throws Exception { - channel1.publishAssignEventAsync(bundle3, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle3, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle3, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle3, brokerId1); + waitUntilNewOwner(channel1, bundle3, brokerId1); + waitUntilNewOwner(channel2, bundle3, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle3).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle3).get(); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); - assertEquals(ownerAddr2, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); + assertEquals(ownerAddr2, Optional.of(brokerId1)); assertTrue(ownerAddr1.isPresent()); NamespaceService namespaceService = spy(pulsar1.getNamespaceService()); @@ -1342,7 +1424,7 @@ public void splitAndRetryFailureTest() throws Exception { }); var leader = channel1.isChannelOwnerAsync().get() ? channel1 : channel2; ((ServiceUnitStateChannelImpl) leader) - .monitorOwnerships(List.of(lookupServiceAddress1, lookupServiceAddress2)); + .monitorOwnerships(List.of(brokerId1, brokerId2)); waitUntilState(leader, bundle3, Deleted); waitUntilState(channel1, bundle3, Deleted); waitUntilState(channel2, bundle3, Deleted); @@ -1353,14 +1435,14 @@ public void splitAndRetryFailureTest() throws Exception { validateEventCounters(channel1, 1, 0, 1, 0, 0, 0); validateEventCounters(channel2, 0, 0, 0, 0, 0, 0); - waitUntilNewOwner(channel1, childBundle31, lookupServiceAddress1); - waitUntilNewOwner(channel1, childBundle32, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle31, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle32, lookupServiceAddress1); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle31).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle32).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle31).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle32).get()); + waitUntilNewOwner(channel1, childBundle31, brokerId1); + waitUntilNewOwner(channel1, childBundle32, brokerId1); + waitUntilNewOwner(channel2, childBundle31, brokerId1); + waitUntilNewOwner(channel2, childBundle32, brokerId1); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle31).get()); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle32).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle31).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle32).get()); // try the monitor and check the monitor moves `Deleted` -> `Init` @@ -1371,9 +1453,9 @@ public void splitAndRetryFailureTest() throws Exception { "semiTerminalStateWaitingTimeInMillis", 1, true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); waitUntilState(channel1, bundle3, Init); waitUntilState(channel2, bundle3, Init); @@ -1409,12 +1491,12 @@ public void testOverrideInactiveBrokerStateData() String leader = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); assertEquals(leader, leader2); - if (leader.equals(lookupServiceAddress2)) { + if (leader.equals(brokerId2)) { leaderChannel = channel2; followerChannel = channel1; } - String broker = lookupServiceAddress1; + String broker = brokerId1; // test override states String releasingBundle = "public/releasing/0xfffffff0_0xffffffff"; @@ -1439,8 +1521,8 @@ public void testOverrideInactiveBrokerStateData() new ServiceUnitStateData(Owned, broker, null, 1)); // test stable metadata state - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) - .when(loadManager).selectAsync(any()); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) + .when(loadManager).selectAsync(any(), any()); leaderChannel.handleMetadataSessionEvent(SessionReestablished); followerChannel.handleMetadataSessionEvent(SessionReestablished); FieldUtils.writeDeclaredField(leaderChannel, "lastMetadataSessionEventTimestamp", @@ -1450,11 +1532,11 @@ public void testOverrideInactiveBrokerStateData() leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); followerChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); - waitUntilNewOwner(channel2, releasingBundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, childBundle11, lookupServiceAddress2); - waitUntilNewOwner(channel2, childBundle12, lookupServiceAddress2); - waitUntilNewOwner(channel2, assigningBundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, ownedBundle, lookupServiceAddress2); + waitUntilNewOwner(channel2, releasingBundle, brokerId2); + waitUntilNewOwner(channel2, childBundle11, brokerId2); + waitUntilNewOwner(channel2, childBundle12, brokerId2); + waitUntilNewOwner(channel2, assigningBundle, brokerId2); + waitUntilNewOwner(channel2, ownedBundle, brokerId2); assertEquals(Optional.empty(), channel2.getOwnerAsync(freeBundle).get()); assertTrue(channel2.getOwnerAsync(deletedBundle).isCompletedExceptionally()); assertTrue(channel2.getOwnerAsync(splittingBundle).isCompletedExceptionally()); @@ -1474,12 +1556,12 @@ public void testOverrideOrphanStateData() String leader = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); assertEquals(leader, leader2); - if (leader.equals(lookupServiceAddress2)) { + if (leader.equals(brokerId2)) { leaderChannel = channel2; followerChannel = channel1; } - String broker = lookupServiceAddress1; + String broker = brokerId1; // test override states String releasingBundle = "public/releasing/0xfffffff0_0xffffffff"; @@ -1504,19 +1586,19 @@ public void testOverrideOrphanStateData() new ServiceUnitStateData(Owned, broker, null, 1)); // test stable metadata state - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) - .when(loadManager).selectAsync(any()); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) + .when(loadManager).selectAsync(any(), any()); FieldUtils.writeDeclaredField(leaderChannel, "inFlightStateWaitingTimeInMillis", -1, true); FieldUtils.writeDeclaredField(followerChannel, "inFlightStateWaitingTimeInMillis", -1, true); ((ServiceUnitStateChannelImpl) leaderChannel) - .monitorOwnerships(List.of(lookupServiceAddress1, lookupServiceAddress2)); + .monitorOwnerships(List.of(brokerId1, brokerId2)); waitUntilNewOwner(channel2, releasingBundle, broker); waitUntilNewOwner(channel2, childBundle11, broker); waitUntilNewOwner(channel2, childBundle12, broker); - waitUntilNewOwner(channel2, assigningBundle, lookupServiceAddress2); + waitUntilNewOwner(channel2, assigningBundle, brokerId2); waitUntilNewOwner(channel2, ownedBundle, broker); assertEquals(Optional.empty(), channel2.getOwnerAsync(freeBundle).get()); assertTrue(channel2.getOwnerAsync(deletedBundle).isCompletedExceptionally()); @@ -1530,10 +1612,84 @@ public void testOverrideOrphanStateData() cleanTableViews(); } + @Test(priority = 19) + public void testActiveGetOwner() throws Exception { + + + // set the bundle owner is the broker + String broker = brokerId2; + String bundle = "public/owned/0xfffffff0_0xffffffff"; + overrideTableViews(bundle, + new ServiceUnitStateData(Owned, broker, null, 1)); + var owner = channel1.getOwnerAsync(bundle).get(5, TimeUnit.SECONDS).get(); + assertEquals(owner, broker); + + // simulate the owner is inactive + var spyRegistry = spy(new BrokerRegistryImpl(pulsar)); + doReturn(CompletableFuture.completedFuture(Optional.empty())) + .when(spyRegistry).lookupAsync(eq(broker)); + FieldUtils.writeDeclaredField(channel1, + "brokerRegistry", spyRegistry , true); + FieldUtils.writeDeclaredField(channel1, + "inFlightStateWaitingTimeInMillis", 1000, true); + + + // verify getOwnerAsync times out because the owner is inactive now. + long start = System.currentTimeMillis(); + var ex = expectThrows(ExecutionException.class, () -> channel1.getOwnerAsync(bundle).get()); + assertTrue(ex.getCause() instanceof IllegalStateException); + assertTrue(System.currentTimeMillis() - start >= 1000); + + // simulate ownership cleanup(no selected owner) by the leader channel + doReturn(CompletableFuture.completedFuture(Optional.empty())) + .when(loadManager).selectAsync(any(), any()); + var leaderChannel = channel1; + String leader1 = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); + String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); + assertEquals(leader1, leader2); + if (leader1.equals(brokerId2)) { + leaderChannel = channel2; + } + leaderChannel.handleMetadataSessionEvent(SessionReestablished); + FieldUtils.writeDeclaredField(leaderChannel, "lastMetadataSessionEventTimestamp", + System.currentTimeMillis() - (MAX_CLEAN_UP_DELAY_TIME_IN_SECS * 1000 + 1000), true); + leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); + + // verify the ownership cleanup, and channel's getOwnerAsync returns empty result without timeout + FieldUtils.writeDeclaredField(channel1, + "inFlightStateWaitingTimeInMillis", 20 * 1000, true); + start = System.currentTimeMillis(); + assertTrue(channel1.getOwnerAsync(bundle).get().isEmpty()); + assertTrue(System.currentTimeMillis() - start < 20_000); + + // simulate ownership cleanup(brokerId1 selected owner) by the leader channel + overrideTableViews(bundle, + new ServiceUnitStateData(Owned, broker, null, 1)); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId1))) + .when(loadManager).selectAsync(any(), any()); + leaderChannel.handleMetadataSessionEvent(SessionReestablished); + FieldUtils.writeDeclaredField(leaderChannel, "lastMetadataSessionEventTimestamp", + System.currentTimeMillis() - (MAX_CLEAN_UP_DELAY_TIME_IN_SECS * 1000 + 1000), true); + getCleanupJobs(leaderChannel).clear(); + leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); + + // verify the ownership cleanup, and channel's getOwnerAsync returns brokerId1 without timeout + start = System.currentTimeMillis(); + assertEquals(brokerId1, channel1.getOwnerAsync(bundle).get().get()); + assertTrue(System.currentTimeMillis() - start < 20_000); + + // test clean-up + FieldUtils.writeDeclaredField(channel1, + "inFlightStateWaitingTimeInMillis", 30 * 1000, true); + FieldUtils.writeDeclaredField(channel1, + "brokerRegistry", registry , true); + cleanTableViews(); + + } - private static ConcurrentOpenHashMap>> getOwnerRequests( + private static ConcurrentHashMap>> getOwnerRequests( ServiceUnitStateChannel channel) throws IllegalAccessException { - return (ConcurrentOpenHashMap>>) + return (ConcurrentHashMap>>) FieldUtils.readDeclaredField(channel, "getOwnerRequests", true); } @@ -1550,9 +1706,9 @@ private static long getLastMetadataSessionEventTimestamp(ServiceUnitStateChannel FieldUtils.readField(channel, "lastMetadataSessionEventTimestamp", true); } - private static ConcurrentOpenHashMap> getCleanupJobs( + private static ConcurrentHashMap> getCleanupJobs( ServiceUnitStateChannel channel) throws IllegalAccessException { - return (ConcurrentOpenHashMap>) + return (ConcurrentHashMap>) FieldUtils.readField(channel, "cleanupJobs", true); } @@ -1641,7 +1797,7 @@ private void waitUntilStateWithMonitor(ServiceUnitStateChannel channel, String k .atMost(10, TimeUnit.SECONDS) .until(() -> { // wait until true ((ServiceUnitStateChannelImpl) channel) - .monitorOwnerships(List.of(lookupServiceAddress1, lookupServiceAddress2)); + .monitorOwnerships(List.of(brokerId1, brokerId2)); ServiceUnitStateData data = tv.get(key); ServiceUnitState actual = state(data); return actual == expected; @@ -1677,10 +1833,13 @@ private void overrideTableViews(String serviceUnit, ServiceUnitStateData val) th overrideTableView(channel2, serviceUnit, val); } - private static void overrideTableView(ServiceUnitStateChannel channel, String serviceUnit, ServiceUnitStateData val) + @Test(enabled = false) + public static void overrideTableView(ServiceUnitStateChannel channel, String serviceUnit, ServiceUnitStateData val) throws IllegalAccessException { var tv = (TableViewImpl) FieldUtils.readField(channel, "tableview", true); + var getOwnerRequests = (Map>) + FieldUtils.readField(channel, "getOwnerRequests", true); var cache = (ConcurrentMap) FieldUtils.readField(tv, "data", true); if(val == null){ @@ -1688,6 +1847,7 @@ private static void overrideTableView(ServiceUnitStateChannel channel, String se } else { cache.put(serviceUnit, val); } + getOwnerRequests.clear(); } private static void cleanOpsCounters(ServiceUnitStateChannel channel) @@ -1863,7 +2023,7 @@ ServiceUnitStateChannelImpl createChannel(PulsarService pulsar) var leaderElectionService = new LeaderElectionService( - pulsar.getCoordinationService(), pulsar.getSafeWebServiceAddress(), + pulsar.getCoordinationService(), pulsar.getBrokerId(), pulsar.getSafeWebServiceAddress(), state -> { if (state == LeaderElectionState.Leading) { channel.scheduleOwnershipMonitor(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategyTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategyTest.java index 64964826af652..62de91dab292b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategyTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateCompactionStrategyTest.java @@ -85,6 +85,10 @@ public void testVersionId(){ new ServiceUnitStateData(Owned, dst, src, 10), new ServiceUnitStateData(Releasing, "broker2", dst, 5))); + assertFalse(strategy.shouldKeepLeft( + new ServiceUnitStateData(Owned, dst, src, 10), + new ServiceUnitStateData(Owned, "broker2", dst, 12))); + } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java index 68bd7b29094cd..a120ef473e9a5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java @@ -90,10 +90,25 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; configuration.setPreferLaterVersions(true); doReturn(configuration).when(mockContext).brokerConfiguration(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java index c2c534f72e9db..87aaf4bac7fae 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java @@ -31,6 +31,9 @@ import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.BrokerFilterException; import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; @@ -67,7 +70,7 @@ public class BrokerIsolationPoliciesFilterTest { */ @Test public void testFilterWithNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() - throws IllegalAccessException, BrokerFilterException { + throws IllegalAccessException, BrokerFilterException, ExecutionException, InterruptedException { var namespace = "my-tenant/my-ns"; NamespaceName namespaceName = NamespaceName.get(namespace); @@ -80,48 +83,48 @@ public void testFilterWithNamespaceIsolationPoliciesForPrimaryAndSecondaryBroker BrokerIsolationPoliciesFilter filter = new BrokerIsolationPoliciesFilter(isolationPoliciesHelper); // a. available-brokers: broker1, broker2, broker3 => result: broker1 - Map result = filter.filter(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()); - assertEquals(result.keySet(), Set.of("broker1")); + Map result = filter.filterAsync(new HashMap<>(Map.of( + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080")); // b. available-brokers: broker2, broker3 => result: broker2 - result = filter.filter(new HashMap<>(Map.of( - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()); - assertEquals(result.keySet(), Set.of("broker2")); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker2:8080")); // c. available-brokers: broker3 => result: NULL - result = filter.filter(new HashMap<>(Map.of( - "broker3", getLookupData())), namespaceName, getContext()); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); assertTrue(result.isEmpty()); // 2. Namespace: primary=broker1, secondary=broker2, shared=broker3, min_limit = 2 setIsolationPolicies(policies, namespaceName, Set.of("broker1"), Set.of("broker2"), Set.of("broker3"), 2); // a. available-brokers: broker1, broker2, broker3 => result: broker1, broker2 - result = filter.filter(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()); - assertEquals(result.keySet(), Set.of("broker1", "broker2")); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080", "broker2:8080")); // b. available-brokers: broker2, broker3 => result: broker2 - result = filter.filter(new HashMap<>(Map.of( - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()); - assertEquals(result.keySet(), Set.of("broker2")); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker2:8080")); // c. available-brokers: broker3 => result: NULL - result = filter.filter(new HashMap<>(Map.of( - "broker3", getLookupData())), namespaceName, getContext()); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); assertTrue(result.isEmpty()); } @Test public void testFilterWithPersistentOrNonPersistentDisabled() - throws IllegalAccessException, BrokerFilterException { + throws IllegalAccessException, BrokerFilterException, ExecutionException, InterruptedException { var namespace = "my-tenant/my-ns"; NamespaceName namespaceName = NamespaceName.get(namespace); NamespaceBundle namespaceBundle = mock(NamespaceBundle.class); @@ -129,7 +132,8 @@ public void testFilterWithPersistentOrNonPersistentDisabled() doReturn(namespaceName).when(namespaceBundle).getNamespaceObject(); var policies = mock(SimpleResourceAllocationPolicies.class); - doReturn(false).when(policies).areIsolationPoliciesPresent(eq(namespaceName)); + doReturn(CompletableFuture.completedFuture(false)) + .when(policies).areIsolationPoliciesPresentAsync(eq(namespaceName)); doReturn(true).when(policies).isSharedBroker(any()); IsolationPoliciesHelper isolationPoliciesHelper = new IsolationPoliciesHelper(policies); @@ -137,32 +141,32 @@ public void testFilterWithPersistentOrNonPersistentDisabled() - Map result = filter.filter(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceBundle, getContext()); - assertEquals(result.keySet(), Set.of("broker1", "broker2", "broker3")); + Map result = filter.filterAsync(new HashMap<>(Map.of( + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080", "broker2:8080", "broker3:8080")); - result = filter.filter(new HashMap<>(Map.of( - "broker1", getLookupData(true, false), - "broker2", getLookupData(true, false), - "broker3", getLookupData())), namespaceBundle, getContext()); - assertEquals(result.keySet(), Set.of("broker3")); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker1:8080", getLookupData(true, false), + "broker2:8080", getLookupData(true, false), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker3:8080")); doReturn(false).when(namespaceBundle).hasNonPersistentTopic(); - result = filter.filter(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceBundle, getContext()); - assertEquals(result.keySet(), Set.of("broker1", "broker2", "broker3")); - - result = filter.filter(new HashMap<>(Map.of( - "broker1", getLookupData(false, true), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceBundle, getContext()); - assertEquals(result.keySet(), Set.of("broker2", "broker3")); + result = filter.filterAsync(new HashMap<>(Map.of( + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080", "broker2:8080", "broker3:8080")); + + result = filter.filterAsync(new HashMap<>(Map.of( + "broker1:8080", getLookupData(false, true), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker2:8080", "broker3:8080")); } private void setIsolationPolicies(SimpleResourceAllocationPolicies policies, @@ -172,7 +176,8 @@ private void setIsolationPolicies(SimpleResourceAllocationPolicies policies, Set shared, int min_limit) { reset(policies); - doReturn(true).when(policies).areIsolationPoliciesPresent(eq(namespaceName)); + doReturn(CompletableFuture.completedFuture(true)) + .when(policies).areIsolationPoliciesPresentAsync(eq(namespaceName)); doReturn(false).when(policies).isPrimaryBroker(eq(namespaceName), any()); doReturn(false).when(policies).isSecondaryBroker(eq(namespaceName), any()); doReturn(false).when(policies).isSharedBroker(any()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilterTest.java index 0169b57fe993e..17475b419576f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerLoadManagerClassFilterTest.java @@ -27,6 +27,7 @@ import org.testng.annotations.Test; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ExecutionException; /** @@ -35,7 +36,7 @@ public class BrokerLoadManagerClassFilterTest extends BrokerFilterTestBase { @Test - public void test() throws BrokerFilterException { + public void test() throws BrokerFilterException, ExecutionException, InterruptedException { LoadManagerContext context = getContext(); context.brokerConfiguration().setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); BrokerLoadManagerClassFilter filter = new BrokerLoadManagerClassFilter(); @@ -44,17 +45,18 @@ public void test() throws BrokerFilterException { "broker1", getLookupData("3.0.0", ExtensibleLoadManagerImpl.class.getName()), "broker2", getLookupData("3.0.0", ExtensibleLoadManagerImpl.class.getName()), "broker3", getLookupData("3.0.0", ModularLoadManagerImpl.class.getName()), - "broker4", getLookupData("3.0.0", ModularLoadManagerImpl.class.getName()) + "broker4", getLookupData("3.0.0", ModularLoadManagerImpl.class.getName()), + "broker5", getLookupData("3.0.0", null) ); - Map result = filter.filter(new HashMap<>(originalBrokers), null, context); + Map result = filter.filterAsync(new HashMap<>(originalBrokers), null, context).get(); assertEquals(result, Map.of( "broker1", getLookupData("3.0.0", ExtensibleLoadManagerImpl.class.getName()), "broker2", getLookupData("3.0.0", ExtensibleLoadManagerImpl.class.getName()) )); context.brokerConfiguration().setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); - result = filter.filter(new HashMap<>(originalBrokers), null, context); + result = filter.filterAsync(new HashMap<>(originalBrokers), null, context).get(); assertEquals(result, Map.of( "broker3", getLookupData("3.0.0", ModularLoadManagerImpl.class.getName()), diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilterTest.java index da13a9526a881..9d000cb91a155 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerMaxTopicCountFilterTest.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ExecutionException; import static org.testng.Assert.assertEquals; @@ -38,7 +39,7 @@ public class BrokerMaxTopicCountFilterTest extends BrokerFilterTestBase { @Test - public void test() throws IllegalAccessException, BrokerFilterException { + public void test() throws IllegalAccessException, BrokerFilterException, ExecutionException, InterruptedException { LoadManagerContext context = getContext(); LoadDataStore store = context.brokerLoadDataStore(); BrokerLoadData maxTopicLoadData = new BrokerLoadData(); @@ -58,7 +59,8 @@ public void test() throws IllegalAccessException, BrokerFilterException { "broker3", getLookupData(), "broker4", getLookupData() ); - Map result = filter.filter(new HashMap<>(originalBrokers), null, context); + Map result = + filter.filterAsync(new HashMap<>(originalBrokers), null, context).get(); assertEquals(result, Map.of( "broker2", getLookupData(), "broker4", getLookupData() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilterTest.java index cafd8f0ea7a4c..2aa7faeb599fd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerVersionFilterTest.java @@ -21,9 +21,11 @@ import static org.mockito.Mockito.doReturn; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ExecutionException; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.BrokerFilterBadVersionException; import org.apache.pulsar.broker.loadbalance.BrokerFilterException; @@ -39,14 +41,14 @@ public class BrokerVersionFilterTest extends BrokerFilterTestBase { @Test - public void testFilterEmptyBrokerList() throws BrokerFilterException { + public void testFilterEmptyBrokerList() throws BrokerFilterException, ExecutionException, InterruptedException { BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter(); - Map result = brokerVersionFilter.filter(new HashMap<>(), null, getContext()); + Map result = brokerVersionFilter.filterAsync(new HashMap<>(), null, getContext()).get(); assertTrue(result.isEmpty()); } @Test - public void testDisabledFilter() throws BrokerFilterException { + public void testDisabledFilter() throws BrokerFilterException, ExecutionException, InterruptedException { LoadManagerContext context = getContext(); ServiceConfiguration configuration = new ServiceConfiguration(); configuration.setPreferLaterVersions(false); @@ -58,12 +60,12 @@ public void testDisabledFilter() throws BrokerFilterException { ); Map brokers = new HashMap<>(originalBrokers); BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter(); - Map result = brokerVersionFilter.filter(brokers, null, context); + Map result = brokerVersionFilter.filterAsync(brokers, null, context).get(); assertEquals(result, originalBrokers); } @Test - public void testFilter() throws BrokerFilterException { + public void testFilter() throws BrokerFilterException, ExecutionException, InterruptedException { Map originalBrokers = Map.of( "localhost:6650", getLookupData("2.10.0"), "localhost:6651", getLookupData("2.10.1"), @@ -71,8 +73,8 @@ public void testFilter() throws BrokerFilterException { "localhost:6653", getLookupData("2.10.1") ); BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter(); - Map result = brokerVersionFilter.filter( - new HashMap<>(originalBrokers), null, getContext()); + Map result = brokerVersionFilter.filterAsync( + new HashMap<>(originalBrokers), null, getContext()).get(); assertEquals(result, Map.of( "localhost:6651", getLookupData("2.10.1"), "localhost:6652", getLookupData("2.10.1"), @@ -85,7 +87,7 @@ public void testFilter() throws BrokerFilterException { "localhost:6652", getLookupData("2.10.1"), "localhost:6653", getLookupData("2.10.1") ); - result = brokerVersionFilter.filter(new HashMap<>(originalBrokers), null, getContext()); + result = brokerVersionFilter.filterAsync(new HashMap<>(originalBrokers), null, getContext()).get(); assertEquals(result, Map.of( "localhost:6652", getLookupData("2.10.1"), @@ -99,19 +101,24 @@ public void testFilter() throws BrokerFilterException { "localhost:6653", getLookupData("2.10.2-SNAPSHOT") ); - result = brokerVersionFilter.filter(new HashMap<>(originalBrokers), null, getContext()); + result = brokerVersionFilter.filterAsync(new HashMap<>(originalBrokers), null, getContext()).get(); assertEquals(result, Map.of( "localhost:6653", getLookupData("2.10.2-SNAPSHOT") )); } - @Test(expectedExceptions = BrokerFilterBadVersionException.class) - public void testInvalidVersionString() throws BrokerFilterException { + @Test + public void testInvalidVersionString() { Map originalBrokers = Map.of( "localhost:6650", getLookupData("xxx") ); BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter(); - brokerVersionFilter.filter(new HashMap<>(originalBrokers), null, getContext()); + try { + brokerVersionFilter.filterAsync(new HashMap<>(originalBrokers), null, getContext()).get(); + fail(); + } catch (Exception ex) { + assertEquals(ex.getCause().getClass(), BrokerFilterBadVersionException.class); + } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManagerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManagerTest.java new file mode 100644 index 0000000000000..cbf77b59d5ad6 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/manager/RedirectManagerTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance.extensions.manager; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; + +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; +import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; +import org.apache.pulsar.broker.lookup.LookupResult; +import org.apache.pulsar.policies.data.loadbalancer.AdvertisedListener; +import org.testng.annotations.Test; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + + +/** + * Unit test {@link RedirectManager}. + */ +public class RedirectManagerTest { + + @Test + public void testFindRedirectLookupResultAsync() throws ExecutionException, InterruptedException { + PulsarService pulsar = mock(PulsarService.class); + ServiceConfiguration configuration = new ServiceConfiguration(); + when(pulsar.getConfiguration()).thenReturn(configuration); + RedirectManager redirectManager = spy(new RedirectManager(pulsar, null)); + + configuration.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); + configuration.setLoadBalancerDebugModeEnabled(true); + + // Test 1: No load manager class name found. + doReturn(CompletableFuture.completedFuture( + new HashMap<>(){{ + put("broker-1", getLookupData("broker-1", null, 10)); + put("broker-2", getLookupData("broker-2", ModularLoadManagerImpl.class.getName(), 1)); + }} + )).when(redirectManager).getAvailableBrokerLookupDataAsync(); + + // Should redirect to broker-1, since broker-1 has the latest load manager, even though the class name is null. + Optional lookupResult = redirectManager.findRedirectLookupResultAsync().get(); + assertTrue(lookupResult.isPresent()); + assertTrue(lookupResult.get().getLookupData().getBrokerUrl().contains("broker-1")); + + // Test 2: Should redirect to broker-1, since the latest broker are using ExtensibleLoadManagerImpl + doReturn(CompletableFuture.completedFuture( + new HashMap<>(){{ + put("broker-1", getLookupData("broker-1", ExtensibleLoadManagerImpl.class.getName(), 10)); + put("broker-2", getLookupData("broker-2", ModularLoadManagerImpl.class.getName(), 1)); + }} + )).when(redirectManager).getAvailableBrokerLookupDataAsync(); + + lookupResult = redirectManager.findRedirectLookupResultAsync().get(); + assertTrue(lookupResult.isPresent()); + assertTrue(lookupResult.get().getLookupData().getBrokerUrl().contains("broker-1")); + + + // Test 3: Should not redirect, since current broker are using ModularLoadManagerImpl + doReturn(CompletableFuture.completedFuture( + new HashMap<>(){{ + put("broker-1", getLookupData("broker-1", ExtensibleLoadManagerImpl.class.getName(), 10)); + put("broker-2", getLookupData("broker-2", ModularLoadManagerImpl.class.getName(), 100)); + }} + )).when(redirectManager).getAvailableBrokerLookupDataAsync(); + + lookupResult = redirectManager.findRedirectLookupResultAsync().get(); + assertFalse(lookupResult.isPresent()); + } + + + public BrokerLookupData getLookupData(String broker, String loadManagerClassName, long startTimeStamp) { + String webServiceUrl = "http://" + broker + ":8080"; + String webServiceUrlTls = "https://" + broker + ":8081"; + String pulsarServiceUrl = "pulsar://" + broker + ":6650"; + String pulsarServiceUrlTls = "pulsar+ssl://" + broker + ":6651"; + Map advertisedListeners = new HashMap<>(); + Map protocols = new HashMap<>(){{ + put("kafka", "9092"); + }}; + return new BrokerLookupData( + webServiceUrl, webServiceUrlTls, pulsarServiceUrl, + pulsarServiceUrlTls, advertisedListeners, protocols, true, true, + loadManagerClassName, startTimeStamp, "3.0.0"); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java index af5890fcacbb2..0ff64616973d9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java @@ -100,6 +100,7 @@ import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; +import org.assertj.core.api.Assertions; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -168,18 +169,18 @@ public LoadManagerContext setupContext(){ var ctx = getContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 1000000, 3000000)); - topBundlesLoadDataStore.pushAsync("broker3", getTopBundlesLoad("my-tenant/my-namespaceC", 2000000, 4000000)); - topBundlesLoadDataStore.pushAsync("broker4", getTopBundlesLoad("my-tenant/my-namespaceD", 2000000, 6000000)); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 7000000)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 1000000, 3000000)); + topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 2000000, 4000000)); + topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 2000000, 6000000)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 7000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 2, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 4, "broker2")); - brokerLoadDataStore.pushAsync("broker3", getCpuLoad(ctx, 6, "broker3")); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 80, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 90, "broker5")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 2, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 4, "broker2:8080")); + brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 6, "broker3:8080")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 80, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 90, "broker5:8080")); return ctx; } @@ -192,9 +193,9 @@ public LoadManagerContext setupContext(int clusterSize) { Random rand = new Random(); for (int i = 0; i < clusterSize; i++) { int brokerLoad = rand.nextInt(1000); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); int bundleLoad = rand.nextInt(brokerLoad + 1); - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, bundleLoad, brokerLoad - bundleLoad)); } return ctx; @@ -209,14 +210,14 @@ public LoadManagerContext setupContextLoadSkewedOverload(int clusterSize) { int i = 0; for (; i < clusterSize-1; i++) { int brokerLoad = 1; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 300_000, 700_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); } int brokerLoad = 100; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 30_000_000, 70_000_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); return ctx; } @@ -230,21 +231,21 @@ public LoadManagerContext setupContextLoadSkewedUnderload(int clusterSize) { int i = 0; for (; i < clusterSize-2; i++) { int brokerLoad = 98; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 30_000_000, 70_000_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); } int brokerLoad = 99; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 30_000_000, 70_000_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); i++; brokerLoad = 1; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 300_000, 700_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); return ctx; } @@ -383,10 +384,25 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; var topBundleLoadDataStore = new LoadDataStore() { @@ -436,19 +452,34 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; BrokerRegistry brokerRegistry = mock(BrokerRegistry.class); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", getMockBrokerLookupData(), - "broker2", getMockBrokerLookupData(), - "broker3", getMockBrokerLookupData(), - "broker4", getMockBrokerLookupData(), - "broker5", getMockBrokerLookupData() + "broker1:8080", getMockBrokerLookupData(), + "broker2:8080", getMockBrokerLookupData(), + "broker3:8080", getMockBrokerLookupData(), + "broker4:8080", getMockBrokerLookupData(), + "broker5:8080", getMockBrokerLookupData() ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); doReturn(conf).when(ctx).brokerConfiguration(); doReturn(brokerLoadDataStore).when(ctx).brokerLoadDataStore(); @@ -496,11 +527,11 @@ public void testEmptyTopBundlesLoadData() { var ctx = getContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 2, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 4, "broker2")); - brokerLoadDataStore.pushAsync("broker3", getCpuLoad(ctx, 6, "broker3")); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 80, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 90, "broker5")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 2, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 4, "broker2:8080")); + brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 6, "broker3:8080")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 80, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 90, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -520,11 +551,11 @@ public void testOutDatedLoadData() throws IllegalAccessException { assertEquals(res.size(), 2); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker1").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker2").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker3").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker4").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker5").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker1:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker2:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker3:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker4:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker5:8080").get(), "updatedAt", 0, true); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -541,20 +572,20 @@ public void testRecentlyUnloadedBrokers() { Map recentlyUnloadedBrokers = new HashMap<>(); var oldTS = System.currentTimeMillis() - ctx.brokerConfiguration() .getLoadBalancerBrokerLoadDataTTLInSeconds() * 1001; - recentlyUnloadedBrokers.put("broker1", oldTS); + recentlyUnloadedBrokers.put("broker1:8080", oldTS); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), recentlyUnloadedBrokers); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); var now = System.currentTimeMillis(); - recentlyUnloadedBrokers.put("broker1", now); + recentlyUnloadedBrokers.put("broker1:8080", now); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), recentlyUnloadedBrokers); assertTrue(res.isEmpty()); @@ -574,9 +605,9 @@ public void testRecentlyUnloadedBundles() { recentlyUnloadedBundles.put(bundleD2, now); var res = transferShedder.findBundlesForUnloading(ctx, recentlyUnloadedBundles, Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker3", + expected.add(new UnloadDecision(new Unload("broker3:8080", "my-tenant/my-namespaceC/0x00000000_0x0FFFFFFF", - Optional.of("broker1")), + Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -609,13 +640,13 @@ public void testBundlesWithIsolationPolicies() { isolationPoliciesHelper, antiAffinityGroupPolicyHelper)); setIsolationPolicies(allocationPoliciesSpy, "my-tenant/my-namespaceE", - Set.of("broker5"), Set.of(), Set.of(), 1); + Set.of("broker5:8080"), Set.of(), Set.of(), 1); var ctx = setupContext(); ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(true); doReturn(ctx.brokerConfiguration()).when(pulsar).getConfiguration(); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -626,7 +657,7 @@ public void testBundlesWithIsolationPolicies() { res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); expected = new HashSet<>(); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.empty()), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.empty()), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -634,7 +665,8 @@ public void testBundlesWithIsolationPolicies() { // test setLoadBalancerSheddingBundlesWithPoliciesEnabled=false; - doReturn(true).when(allocationPoliciesSpy).areIsolationPoliciesPresent(any()); + doReturn(CompletableFuture.completedFuture(true)) + .when(allocationPoliciesSpy).areIsolationPoliciesPresentAsync(any()); ctx.brokerConfiguration().setLoadBalancerTransferEnabled(true); ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(false); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -679,12 +711,14 @@ private void setIsolationPolicies(SimpleResourceAllocationPolicies policies, NamespaceBundle namespaceBundle = mock(NamespaceBundle.class); doReturn(true).when(namespaceBundle).hasNonPersistentTopic(); doReturn(namespaceName).when(namespaceBundle).getNamespaceObject(); - doReturn(false).when(policies).areIsolationPoliciesPresent(any()); + doReturn(CompletableFuture.completedFuture(false)) + .when(policies).areIsolationPoliciesPresentAsync(any()); doReturn(false).when(policies).isPrimaryBroker(any(), any()); doReturn(false).when(policies).isSecondaryBroker(any(), any()); doReturn(true).when(policies).isSharedBroker(any()); - doReturn(true).when(policies).areIsolationPoliciesPresent(eq(namespaceName)); + doReturn(CompletableFuture.completedFuture(true)) + .when(policies).areIsolationPoliciesPresentAsync(eq(namespaceName)); primary.forEach(broker -> { doReturn(true).when(policies).isPrimaryBroker(eq(namespaceName), eq(broker)); @@ -723,8 +757,8 @@ public void testBundlesWithAntiAffinityGroup() throws MetadataStoreException { doAnswer(invocationOnMock -> { Map brokers = invocationOnMock.getArgument(0); brokers.clear(); - return brokers; - }).when(antiAffinityGroupPolicyHelper).filter(any(), any()); + return CompletableFuture.completedFuture(brokers); + }).when(antiAffinityGroupPolicyHelper).filterAsync(any(), any()); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); @@ -737,14 +771,14 @@ public void testBundlesWithAntiAffinityGroup() throws MetadataStoreException { String bundle = invocationOnMock.getArgument(1, String.class); if (bundle.equalsIgnoreCase(bundleE1)) { - return brokers; + return CompletableFuture.completedFuture(brokers); } brokers.clear(); - return brokers; - }).when(antiAffinityGroupPolicyHelper).filter(any(), any()); + return CompletableFuture.completedFuture(brokers); + }).when(antiAffinityGroupPolicyHelper).filterAsync(any(), any()); var res2 = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected2 = new HashSet<>(); - expected2.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected2.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res2, expected2); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -761,10 +795,10 @@ public String name() { } @Override - public Map filter(Map brokers, - ServiceUnitId serviceUnit, - LoadManagerContext context) throws BrokerFilterException { - throw new BrokerFilterException("test"); + public CompletableFuture> filterAsync(Map brokers, + ServiceUnitId serviceUnit, + LoadManagerContext context) { + return FutureUtil.failedFuture(new BrokerFilterException("test")); } }; filters.add(filter); @@ -820,22 +854,22 @@ public void testTargetStd() { var ctx = getContext(); BrokerRegistry brokerRegistry = mock(BrokerRegistry.class); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class), - "broker3", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class), + "broker3:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); doReturn(brokerRegistry).when(ctx).brokerRegistry(); ctx.brokerConfiguration().setLoadBalancerDebugModeEnabled(true); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 10, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 20, "broker2")); - brokerLoadDataStore.pushAsync("broker3", getCpuLoad(ctx, 30, "broker3")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 20, "broker2:8080")); + brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 30, "broker3:8080")); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 30, 30)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 40, 40)); - topBundlesLoadDataStore.pushAsync("broker3", getTopBundlesLoad("my-tenant/my-namespaceC", 50, 50)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 30, 30)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 40, 40)); + topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 50, 50)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -851,11 +885,11 @@ public void testSingleTopBundlesLoadData() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 2)); - topBundlesLoadDataStore.pushAsync("broker3", getTopBundlesLoad("my-tenant/my-namespaceC", 6)); - topBundlesLoadDataStore.pushAsync("broker4", getTopBundlesLoad("my-tenant/my-namespaceD", 10)); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 70)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 2)); + topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 6)); + topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 10)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 70)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); @@ -870,14 +904,14 @@ public void testBundleThroughputLargerThanOffloadThreshold() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker4", getTopBundlesLoad("my-tenant/my-namespaceD", 1000000000, 1000000000)); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 1000000000, 1000000000)); + topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 1000000000, 1000000000)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 1000000000, 1000000000)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker3", + expected.add(new UnloadDecision(new Unload("broker3:8080", "my-tenant/my-namespaceC/0x00000000_0x0FFFFFFF", - Optional.of("broker1")), + Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -891,12 +925,12 @@ public void testTargetStdAfterTransfer() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 55, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 65, "broker5")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 0.26400000000000007); @@ -912,43 +946,43 @@ public void testUnloadBundlesGreaterThanTargetThroughput() throws IllegalAccessE var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); - topBundlesLoadDataStore.pushAsync("broker2", + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 100000000, 180000000, 220000000, 250000000, 250000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 10, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 1000, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 1000, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x00000000_0x1FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x00000000_0x1FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1", "my-tenant/my-namespaceA/0x00000000_0x1FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080", "my-tenant/my-namespaceA/0x00000000_0x1FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1", "my-tenant/my-namespaceA/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080", "my-tenant/my-namespaceA/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1","my-tenant/my-namespaceA/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080","my-tenant/my-namespaceA/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1","my-tenant/my-namespaceA/0x3FFFFFFF_0x4FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080","my-tenant/my-namespaceA/0x3FFFFFFF_0x4FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 5.05); assertEquals(counter.getLoadStd(), 4.95); @@ -968,20 +1002,20 @@ public void testSkipBundlesGreaterThanTargetThroughputAfterSplit() { var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1, 500000000)); - topBundlesLoadDataStore.pushAsync("broker2", + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 500000000, 500000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 50, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 100, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 50, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 100, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -999,24 +1033,24 @@ public void testUnloadBundlesLessThanTargetThroughputAfterSplit() throws Illegal var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 490000000, 510000000)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 490000000, 510000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 10, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 1000, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 1000, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x00000000_0x0FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x00000000_0x0FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 5.05); assertEquals(counter.getLoadStd(), 4.95); @@ -1037,30 +1071,30 @@ public void testUnloadBundlesGreaterThanTargetThroughputAfterSplit() throws Ille var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 2400000, 2400000)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 5000000, 5000000)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 2400000, 2400000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 5000000, 5000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 48, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 100, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 48, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 100, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker1", - res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker1")).findFirst().get() - .getUnload().serviceUnit(), Optional.of("broker2")), + new Unload("broker1:8080", + res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker1:8080")).findFirst().get() + .getUnload().serviceUnit(), Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker2", - res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker2")).findFirst().get() - .getUnload().serviceUnit(), Optional.of("broker1")), + new Unload("broker2:8080", + res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker2:8080")).findFirst().get() + .getUnload().serviceUnit(), Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 0.74); assertEquals(counter.getLoadStd(), 0.26); @@ -1078,18 +1112,18 @@ public void testMinBrokerWithZeroTraffic() throws IllegalAccessException { var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - var load = getCpuLoad(ctx, 4, "broker2"); + var load = getCpuLoad(ctx, 4, "broker2:8080"); FieldUtils.writeDeclaredField(load,"msgThroughputEMA", 0, true); - brokerLoadDataStore.pushAsync("broker2", load); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 55, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 65, "broker5")); + brokerLoadDataStore.pushAsync("broker2:8080", load); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Underloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 0.26400000000000007); @@ -1103,17 +1137,17 @@ public void testMinBrokerWithLowerLoadThanAvg() throws IllegalAccessException { var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - var load = getCpuLoad(ctx, 3 , "broker2"); - brokerLoadDataStore.pushAsync("broker2", load); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 55, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 65, "broker5")); + var load = getCpuLoad(ctx, 3 , "broker2:8080"); + brokerLoadDataStore.pushAsync("broker2:8080", load); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Underloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 0.262); @@ -1129,9 +1163,9 @@ public void testMaxNumberOfTransfersPerShedderCycle() { .setLoadBalancerMaxNumberOfBrokerSheddingPerCycle(10); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -1155,9 +1189,9 @@ public void testLoadBalancerSheddingConditionHitCountThreshold() { } var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -1170,13 +1204,13 @@ public void testRemainingTopBundles() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 3000000)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 3000000)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -1190,14 +1224,14 @@ public void testLoadMoreThan100() throws IllegalAccessException { var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 200, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 1000, "broker5")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 200, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 1000, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 2.4240000000000004); @@ -1231,13 +1265,16 @@ public void testOverloadOutlier() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContextLoadSkewedOverload(100); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); - var expected = new HashSet(); - expected.add(new UnloadDecision( - new Unload("broker99", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF", - Optional.of("broker52")), Success, Overloaded)); - assertEquals(res, expected); - assertEquals(counter.getLoadAvg(), 0.019900000000000008); - assertEquals(counter.getLoadStd(), 0.09850375627355534); + Assertions.assertThat(res).isIn( + Set.of(new UnloadDecision( + new Unload("broker99:8080", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF", + Optional.of("broker52:8080")), Success, Overloaded)), + Set.of(new UnloadDecision( + new Unload("broker99:8080", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF", + Optional.of("broker83:8080")), Success, Overloaded)) + ); + assertEquals(counter.getLoadAvg(), 0.019900000000000008, 0.00001); + assertEquals(counter.getLoadStd(), 0.09850375627355534, 0.00001); } @Test @@ -1248,11 +1285,11 @@ public void testUnderloadOutlier() { var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker98", "my-tenant/my-namespace98/0x00000000_0x0FFFFFFF", - Optional.of("broker99")), Success, Underloaded)); + new Unload("broker98:8080", "my-tenant/my-namespace98/0x00000000_0x0FFFFFFF", + Optional.of("broker99:8080")), Success, Underloaded)); assertEquals(res, expected); - assertEquals(counter.getLoadAvg(), 0.9704000000000005); - assertEquals(counter.getLoadStd(), 0.09652895938523735); + assertEquals(counter.getLoadAvg(), 0.9704000000000005, 0.00001); + assertEquals(counter.getLoadStd(), 0.09652895938523735, 0.00001); } @Test @@ -1268,13 +1305,13 @@ public void testRandomLoadStats() { double[] loads = new double[numBrokers]; final Map availableBrokers = new HashMap<>(); for (int i = 0; i < loads.length; i++) { - availableBrokers.put("broker" + i, mock(BrokerLookupData.class)); + availableBrokers.put("broker" + i + ":8080", mock(BrokerLookupData.class)); } stats.update(loadStore, availableBrokers, Map.of(), conf); var brokerLoadDataStore = ctx.brokerLoadDataStore(); for (int i = 0; i < loads.length; i++) { - loads[i] = loadStore.get("broker" + i).get().getWeightedMaxEMA(); + loads[i] = loadStore.get("broker" + i + ":8080").get().getWeightedMaxEMA(); } int i = 0; int j = loads.length - 1; @@ -1309,8 +1346,8 @@ public void testHighVarianceLoadStats() { var conf = ctx.brokerConfiguration(); final Map availableBrokers = new HashMap<>(); for (int i = 0; i < loads.length; i++) { - availableBrokers.put("broker" + i, mock(BrokerLookupData.class)); - loadStore.pushAsync("broker" + i, getCpuLoad(ctx, loads[i], "broker" + i)); + availableBrokers.put("broker" + i + ":8080", mock(BrokerLookupData.class)); + loadStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, loads[i], "broker" + i + ":8080")); } stats.update(loadStore, availableBrokers, Map.of(), conf); @@ -1328,8 +1365,8 @@ public void testLowVarianceLoadStats() { var conf = ctx.brokerConfiguration(); final Map availableBrokers = new HashMap<>(); for (int i = 0; i < loads.length; i++) { - availableBrokers.put("broker" + i, mock(BrokerLookupData.class)); - loadStore.pushAsync("broker" + i, getCpuLoad(ctx, loads[i], "broker" + i)); + availableBrokers.put("broker" + i + ":8080", mock(BrokerLookupData.class)); + loadStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, loads[i], "broker" + i + ":8080")); } stats.update(loadStore, availableBrokers, Map.of(), conf); assertEquals(stats.avg(), 3.9449999999999994); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java index 184c337a47c80..d25cba2bd1bdd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java @@ -20,7 +20,6 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertNotNull; import static org.testng.AssertJUnit.assertTrue; import com.google.common.collect.Sets; @@ -28,6 +27,7 @@ import lombok.Cleanup; import lombok.Data; import lombok.NoArgsConstructor; +import org.apache.commons.lang.reflect.FieldUtils; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; @@ -74,7 +74,8 @@ public void testPushGetAndRemove() throws Exception { @Cleanup LoadDataStore loadDataStore = - LoadDataStoreFactory.create(pulsar.getClient(), topic, MyClass.class); + LoadDataStoreFactory.create(pulsar, topic, MyClass.class); + loadDataStore.startProducer(); loadDataStore.startTableView(); MyClass myClass1 = new MyClass("1", 1); loadDataStore.pushAsync("key1", myClass1).get(); @@ -107,7 +108,8 @@ public void testForEach() throws Exception { @Cleanup LoadDataStore loadDataStore = - LoadDataStoreFactory.create(pulsar.getClient(), topic, Integer.class); + LoadDataStoreFactory.create(pulsar, topic, Integer.class); + loadDataStore.startProducer(); loadDataStore.startTableView(); Map map = new HashMap<>(); @@ -131,7 +133,8 @@ public void testForEach() throws Exception { public void testTableViewRestart() throws Exception { String topic = TopicDomain.persistent + "://" + NamespaceName.SYSTEM_NAMESPACE + "/" + UUID.randomUUID(); LoadDataStore loadDataStore = - LoadDataStoreFactory.create(pulsar.getClient(), topic, Integer.class); + LoadDataStoreFactory.create(pulsar, topic, Integer.class); + loadDataStore.startProducer(); loadDataStore.startTableView(); loadDataStore.pushAsync("1", 1).get(); @@ -140,15 +143,26 @@ public void testTableViewRestart() throws Exception { loadDataStore.closeTableView(); loadDataStore.pushAsync("1", 2).get(); - Exception ex = null; - try { - loadDataStore.get("1"); - } catch (IllegalStateException e) { - ex = e; - } - assertNotNull(ex); - loadDataStore.startTableView(); Awaitility.await().untilAsserted(() -> assertEquals(loadDataStore.get("1").get(), 2)); + + loadDataStore.pushAsync("1", 3).get(); + FieldUtils.writeField(loadDataStore, "tableViewLastUpdateTimestamp", 0 , true); + Awaitility.await().untilAsserted(() -> assertEquals(loadDataStore.get("1").get(), 3)); + } + + @Test + public void testProducerStop() throws Exception { + String topic = TopicDomain.persistent + "://" + NamespaceName.SYSTEM_NAMESPACE + "/" + UUID.randomUUID(); + LoadDataStore loadDataStore = + LoadDataStoreFactory.create(pulsar, topic, Integer.class); + loadDataStore.startProducer(); + loadDataStore.pushAsync("1", 1).get(); + loadDataStore.removeAsync("1").get(); + + loadDataStore.close(); + + loadDataStore.pushAsync("2", 2).get(); + loadDataStore.removeAsync("2").get(); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java index 0eea1d87513bf..b1e09bf2f3afb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java @@ -252,10 +252,25 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; doReturn(conf).when(ctx).brokerConfiguration(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilterTest.java index 856bbac029226..56332111f935b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BrokerLoadManagerClassFilterTest.java @@ -46,8 +46,12 @@ public void test() throws BrokerFilterException { LocalBrokerData localBrokerData1 = new LocalBrokerData(); localBrokerData1.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); + + LocalBrokerData localBrokerData2 = new LocalBrokerData(); + localBrokerData2.setLoadManagerClassName(null); loadData.getBrokerData().put("broker1", new BrokerData(localBrokerData)); loadData.getBrokerData().put("broker2", new BrokerData(localBrokerData1)); + loadData.getBrokerData().put("broker3", new BrokerData(localBrokerData2)); ServiceConfiguration conf = new ServiceConfiguration(); conf.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); @@ -55,6 +59,7 @@ public void test() throws BrokerFilterException { Set brokers = new HashSet<>(){{ add("broker1"); add("broker2"); + add("broker3"); }}; filter.filter(brokers, null, loadData, conf); @@ -64,6 +69,7 @@ public void test() throws BrokerFilterException { brokers = new HashSet<>(){{ add("broker1"); add("broker2"); + add("broker3"); }}; conf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); filter.filter(brokers, null, loadData, conf); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImplTest.java index 39298dce0b16a..563f707c445b2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImplTest.java @@ -18,15 +18,19 @@ */ package org.apache.pulsar.broker.loadbalance.impl; -import lombok.Cleanup; -import org.testng.Assert; -import org.testng.annotations.Test; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.loadbalance.LinuxInfoUtils; +import org.testng.Assert; +import org.testng.annotations.Test; +@Slf4j public class LinuxBrokerHostUsageImplTest { @Test @@ -42,4 +46,31 @@ public void checkOverrideBrokerNicSpeedGbps() { double totalLimit = linuxBrokerHostUsage.getTotalNicLimitWithConfiguration(nics); Assert.assertEquals(totalLimit, 3.0 * 1000 * 1000 * 3); } + + @Test + public void testCpuUsage() throws InterruptedException { + if (!LinuxInfoUtils.isLinux()) { + return; + } + + @Cleanup("shutdown") + ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); + LinuxBrokerHostUsageImpl linuxBrokerHostUsage = + new LinuxBrokerHostUsageImpl(Integer.MAX_VALUE, Optional.empty(), executorService); + + linuxBrokerHostUsage.calculateBrokerHostUsage(); + TimeUnit.SECONDS.sleep(1); + linuxBrokerHostUsage.calculateBrokerHostUsage(); + + double usage = linuxBrokerHostUsage.getBrokerHostUsage().getCpu().usage; + double limit = linuxBrokerHostUsage.getBrokerHostUsage().getCpu().limit; + float percentUsage = linuxBrokerHostUsage.getBrokerHostUsage().getCpu().percentUsage(); + + Assert.assertTrue(usage > 0); + Assert.assertTrue(limit > 0); + Assert.assertTrue(limit >= usage); + Assert.assertTrue(percentUsage > 0); + + log.info("usage: {}, limit: {}, percentUsage: {}", usage, limit, percentUsage); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/ModularLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java similarity index 75% rename from pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/ModularLoadManagerImplTest.java rename to pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java index 4b9f679f19d1f..8609d5889af67 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/ModularLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java @@ -16,10 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.loadbalance; +package org.apache.pulsar.broker.loadbalance.impl; import static java.lang.Thread.sleep; import static org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl.TIME_AVERAGE_BROKER_ZPATH; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -28,6 +29,7 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.BoundType; import com.google.common.collect.Range; @@ -43,7 +45,9 @@ import java.util.Optional; import java.util.Random; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; @@ -55,33 +59,39 @@ import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; -import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; +import org.apache.pulsar.broker.loadbalance.LoadBalancerTestingUtils; +import org.apache.pulsar.broker.loadbalance.LoadData; +import org.apache.pulsar.broker.loadbalance.LoadManager; +import org.apache.pulsar.broker.loadbalance.ResourceUnit; import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.BrokerTopicLoadingPredicate; -import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; -import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerWrapper; -import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceAllocationPolicies; import org.apache.pulsar.client.admin.Namespaces; import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceBundleFactory; +import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; import org.apache.pulsar.common.naming.NamespaceBundles; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.ServiceUnitId; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.NamespaceIsolationDataImpl; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.common.util.PortManager; +import org.apache.pulsar.metadata.api.MetadataCache; import org.apache.pulsar.metadata.api.Notification; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.CreateOption; +import org.apache.pulsar.policies.data.loadbalancer.BrokerData; +import org.apache.pulsar.policies.data.loadbalancer.BundleData; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; -import org.apache.pulsar.policies.data.loadbalancer.BrokerData; -import org.apache.pulsar.policies.data.loadbalancer.BundleData; import org.apache.pulsar.policies.data.loadbalancer.TimeAverageBrokerData; import org.apache.pulsar.policies.data.loadbalancer.TimeAverageMessageData; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; @@ -107,8 +117,9 @@ public class ModularLoadManagerImplTest { private PulsarService pulsar3; - private String primaryHost; - private String secondaryHost; + private String primaryBrokerId; + + private String secondaryBrokerId; private NamespaceBundleFactory nsFactory; @@ -155,6 +166,7 @@ void setup() throws Exception { config1.setLoadBalancerLoadSheddingStrategy("org.apache.pulsar.broker.loadbalance.impl.OverloadShedder"); config1.setClusterName("use"); config1.setWebServicePort(Optional.of(0)); + config1.setWebServicePortTls(Optional.of(0)); config1.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config1.setAdvertisedAddress("localhost"); @@ -162,11 +174,10 @@ void setup() throws Exception { config1.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config1.setBrokerServicePort(Optional.of(0)); config1.setBrokerServicePortTls(Optional.of(0)); - config1.setWebServicePortTls(Optional.of(0)); pulsar1 = new PulsarService(config1); pulsar1.start(); - primaryHost = String.format("%s:%d", "localhost", pulsar1.getListenPortHTTP().get()); + primaryBrokerId = pulsar1.getBrokerId(); url1 = new URL(pulsar1.getWebServiceAddress()); admin1 = PulsarAdmin.builder().serviceHttpUrl(url1.toString()).build(); @@ -176,13 +187,13 @@ void setup() throws Exception { config2.setLoadBalancerLoadSheddingStrategy("org.apache.pulsar.broker.loadbalance.impl.OverloadShedder"); config2.setClusterName("use"); config2.setWebServicePort(Optional.of(0)); + config2.setWebServicePortTls(Optional.of(0)); config2.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config2.setAdvertisedAddress("localhost"); config2.setBrokerShutdownTimeoutMs(0L); config2.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config2.setBrokerServicePort(Optional.of(0)); config2.setBrokerServicePortTls(Optional.of(0)); - config2.setWebServicePortTls(Optional.of(0)); pulsar2 = new PulsarService(config2); pulsar2.start(); @@ -191,16 +202,16 @@ void setup() throws Exception { config.setLoadBalancerLoadSheddingStrategy("org.apache.pulsar.broker.loadbalance.impl.OverloadShedder"); config.setClusterName("use"); config.setWebServicePort(Optional.of(0)); + config.setWebServicePortTls(Optional.of(0)); config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config.setAdvertisedAddress("localhost"); config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config.setBrokerServicePort(Optional.of(0)); config.setBrokerServicePortTls(Optional.of(0)); - config.setWebServicePortTls(Optional.of(0)); pulsar3 = new PulsarService(config); - secondaryHost = String.format("%s:%d", "localhost", pulsar2.getListenPortHTTP().get()); + secondaryBrokerId = pulsar2.getBrokerId(); url2 = new URL(pulsar2.getWebServiceAddress()); admin2 = PulsarAdmin.builder().serviceHttpUrl(url2.toString()).build(); @@ -220,7 +231,7 @@ void shutdown() throws Exception { pulsar2.close(); pulsar1.close(); - + if (pulsar3.isRunning()) { pulsar3.close(); } @@ -251,7 +262,7 @@ public void testCandidateConsistency() throws Exception { for (int i = 0; i < 2; ++i) { final ServiceUnitId serviceUnit = makeBundle(Integer.toString(i)); final String broker = primaryLoadManager.selectBrokerForAssignment(serviceUnit).get(); - if (broker.equals(primaryHost)) { + if (broker.equals(primaryBrokerId)) { foundFirst = true; } else { foundSecond = true; @@ -267,12 +278,12 @@ public void testCandidateConsistency() throws Exception { LoadData loadData = (LoadData) getField(primaryLoadManager, "loadData"); // Make sure the second broker is not in the internal map. - Awaitility.await().untilAsserted(() -> assertFalse(loadData.getBrokerData().containsKey(secondaryHost))); + Awaitility.await().untilAsserted(() -> assertFalse(loadData.getBrokerData().containsKey(secondaryBrokerId))); // Try 5 more selections, ensure they all go to the first broker. for (int i = 2; i < 7; ++i) { final ServiceUnitId serviceUnit = makeBundle(Integer.toString(i)); - assertEquals(primaryLoadManager.selectBrokerForAssignment(serviceUnit), primaryHost); + assertEquals(primaryLoadManager.selectBrokerForAssignment(serviceUnit), primaryBrokerId); } } @@ -294,7 +305,7 @@ public void testEvenBundleDistribution() throws Exception { // one bundle. pulsar1.getLocalMetadataStore().getMetadataCache(BundleData.class).create(firstBundleDataPath, bundleData).join(); for (final NamespaceBundle bundle : bundles) { - if (primaryLoadManager.selectBrokerForAssignment(bundle).equals(primaryHost)) { + if (primaryLoadManager.selectBrokerForAssignment(bundle).equals(primaryBrokerId)) { ++numAssignedToPrimary; } else { ++numAssignedToSecondary; @@ -308,52 +319,52 @@ public void testEvenBundleDistribution() throws Exception { } - + @Test public void testBrokerAffinity() throws Exception { // Start broker 3 pulsar3.start(); - + final String tenant = "test"; final String cluster = "test"; String namespace = tenant + "/" + cluster + "/" + "test"; String topic = "persistent://" + namespace + "/my-topic1"; - admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl("http://" + pulsar1.getAdvertisedAddress()).build()); + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); admin1.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); admin1.namespaces().createNamespace(namespace, 16); - + String topicLookup = admin1.lookups().lookupTopic(topic); String bundleRange = admin1.lookups().getBundleRange(topic); - + String brokerServiceUrl = pulsar1.getBrokerServiceUrl(); - String brokerUrl = pulsar1.getSafeWebServiceAddress(); + String brokerId = pulsar1.getBrokerId(); log.debug("initial broker service url - {}", topicLookup); Random rand=new Random(); - + if (topicLookup.equals(brokerServiceUrl)) { int x = rand.nextInt(2); if (x == 0) { - brokerUrl = pulsar2.getSafeWebServiceAddress(); + brokerId = pulsar2.getBrokerId(); brokerServiceUrl = pulsar2.getBrokerServiceUrl(); } else { - brokerUrl = pulsar3.getSafeWebServiceAddress(); + brokerId = pulsar3.getBrokerId(); brokerServiceUrl = pulsar3.getBrokerServiceUrl(); } } - brokerUrl = brokerUrl.replaceFirst("http[s]?://", ""); - log.debug("destination broker service url - {}, broker url - {}", brokerServiceUrl, brokerUrl); - String leaderServiceUrl = admin1.brokers().getLeaderBroker().getServiceUrl(); - log.debug("leader serviceUrl - {}, broker1 service url - {}", leaderServiceUrl, pulsar1.getSafeWebServiceAddress()); - //Make a call to broker which is not a leader - if (!leaderServiceUrl.equals(pulsar1.getSafeWebServiceAddress())) { - admin1.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerUrl); + log.debug("destination broker service url - {}, broker url - {}", brokerServiceUrl, brokerId); + String leaderBrokerId = admin1.brokers().getLeaderBroker().getBrokerId(); + log.debug("leader lookup address - {}, broker1 lookup address - {}", leaderBrokerId, + pulsar1.getBrokerId()); + // Make a call to broker which is not a leader + if (!leaderBrokerId.equals(pulsar1.getBrokerId())) { + admin1.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerId); } else { - admin2.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerUrl); + admin2.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerId); } - + sleep(2000); String topicLookupAfterUnload = admin1.lookups().lookupTopic(topic); log.debug("final broker service url - {}", topicLookupAfterUnload); @@ -410,39 +421,69 @@ public void testLoadShedding() throws Exception { doAnswer(invocation -> { bundleReference.set(invocation.getArguments()[0].toString() + '/' + invocation.getArguments()[1]); return null; - }).when(namespacesSpy1).unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString()); + }).when(namespacesSpy1).unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); + + AtomicReference> selectedBrokerRef = new AtomicReference<>(); + ModularLoadManagerImpl primaryLoadManagerSpy = spy(primaryLoadManager); + doAnswer(invocation -> { + ServiceUnitId serviceUnitId = (ServiceUnitId) invocation.getArguments()[0]; + Optional broker = primaryLoadManager.selectBroker(serviceUnitId); + selectedBrokerRef.set(broker); + return broker; + }).when(primaryLoadManagerSpy).selectBroker(any()); + setField(pulsar1.getAdminClient(), "namespaces", namespacesSpy1); pulsar1.getConfiguration().setLoadBalancerEnabled(true); - final LoadData loadData = (LoadData) getField(primaryLoadManager, "loadData"); + final LoadData loadData = (LoadData) getField(primaryLoadManagerSpy, "loadData"); final Map brokerDataMap = loadData.getBrokerData(); - final BrokerData brokerDataSpy1 = spy(brokerDataMap.get(primaryHost)); + final BrokerData brokerDataSpy1 = spy(brokerDataMap.get(primaryBrokerId)); when(brokerDataSpy1.getLocalData()).thenReturn(localBrokerData); - brokerDataMap.put(primaryHost, brokerDataSpy1); + brokerDataMap.put(primaryBrokerId, brokerDataSpy1); // Need to update all the bundle data for the shredder to see the spy. - primaryLoadManager.handleDataNotification(new Notification(NotificationType.Created, LoadManager.LOADBALANCE_BROKERS_ROOT + "/broker:8080")); + primaryLoadManagerSpy.handleDataNotification(new Notification(NotificationType.Created, LoadManager.LOADBALANCE_BROKERS_ROOT + "/broker:8080")); sleep(100); localBrokerData.setCpu(new ResourceUsage(80, 100)); - primaryLoadManager.doLoadShedding(); + primaryLoadManagerSpy.doLoadShedding(); // 80% is below overload threshold: verify nothing is unloaded. - verify(namespacesSpy1, Mockito.times(0)).unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString()); + verify(namespacesSpy1, Mockito.times(0)) + .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); localBrokerData.setCpu(new ResourceUsage(90, 100)); - primaryLoadManager.doLoadShedding(); + primaryLoadManagerSpy.doLoadShedding(); // Most expensive bundle will be unloaded. - verify(namespacesSpy1, Mockito.times(1)).unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString()); + verify(namespacesSpy1, Mockito.times(1)) + .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); assertEquals(bundleReference.get(), mockBundleName(2)); + assertEquals(selectedBrokerRef.get().get(), secondaryBrokerId); - primaryLoadManager.doLoadShedding(); + primaryLoadManagerSpy.doLoadShedding(); // Now less expensive bundle will be unloaded (normally other bundle would move off and nothing would be // unloaded, but this is not the case due to the spy's behavior). - verify(namespacesSpy1, Mockito.times(2)).unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString()); + verify(namespacesSpy1, Mockito.times(2)) + .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); assertEquals(bundleReference.get(), mockBundleName(1)); + assertEquals(selectedBrokerRef.get().get(), secondaryBrokerId); - primaryLoadManager.doLoadShedding(); + primaryLoadManagerSpy.doLoadShedding(); // Now both are in grace period: neither should be unloaded. - verify(namespacesSpy1, Mockito.times(2)).unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString()); + verify(namespacesSpy1, Mockito.times(2)) + .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); + assertEquals(selectedBrokerRef.get().get(), secondaryBrokerId); + + // Test bundle transfer to same broker + + loadData.getRecentlyUnloadedBundles().clear(); + primaryLoadManagerSpy.doLoadShedding(); + verify(namespacesSpy1, Mockito.times(3)) + .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); + + loadData.getRecentlyUnloadedBundles().clear(); + primaryLoadManagerSpy.doLoadShedding(); + // The bundle shouldn't be unloaded because the broker is the same. + verify(namespacesSpy1, Mockito.times(4)) + .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); } // Test that ModularLoadManagerImpl will determine that writing local data to ZooKeeper is necessary if certain @@ -559,10 +600,12 @@ public void testNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() throws final String tenant = "my-property"; final String cluster = "use"; final String namespace = "my-ns"; - final String broker1Address = pulsar1.getAdvertisedAddress() + "0"; - final String broker2Address = pulsar2.getAdvertisedAddress() + "1"; - final String sharedBroker = "broker3"; - admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl("http://" + pulsar1.getAdvertisedAddress()).build()); + String broker1Host = pulsar1.getAdvertisedAddress() + "0"; + final String broker1Address = broker1Host + ":8080"; + String broker2Host = pulsar2.getAdvertisedAddress() + "1"; + final String broker2Address = broker2Host + ":8080"; + final String sharedBroker = "broker3:8080"; + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); admin1.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); admin1.namespaces().createNamespace(tenant + "/" + cluster + "/" + namespace); @@ -570,8 +613,8 @@ public void testNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() throws // set a new policy String newPolicyJsonTemplate = "{\"namespaces\":[\"%s/%s/%s.*\"],\"primary\":[\"%s\"]," + "\"secondary\":[\"%s\"],\"auto_failover_policy\":{\"policy_type\":\"min_available\",\"parameters\":{\"min_limit\":%s,\"usage_threshold\":80}}}"; - String newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Address, - broker2Address, 1); + String newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Host, + broker2Host, 1); String newPolicyName = "my-ns-isolation-policies"; ObjectMapper jsonMapper = ObjectMapperFactory.create(); NamespaceIsolationDataImpl nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(), @@ -583,12 +626,12 @@ public void testNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() throws ServiceUnitId serviceUnit = LoadBalancerTestingUtils.makeBundles(nsFactory, tenant, cluster, namespace, 1)[0]; BrokerTopicLoadingPredicate brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { + public boolean isEnablePersistentTopics(String brokerId) { return true; } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { + public boolean isEnableNonPersistentTopics(String brokerId) { return true; } }; @@ -620,8 +663,8 @@ public boolean isEnableNonPersistentTopics(String brokerUrl) { // (2) now we will have isolation policy : primary=broker1, secondary=broker2, minLimit=2 - newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Address, - broker2Address, 2); + newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Host, + broker2Host, 2); nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(), NamespaceIsolationDataImpl.class); admin1.clusters().createNamespaceIsolationPolicy("use", newPolicyName, nsPolicyData); @@ -658,16 +701,18 @@ public void testLoadSheddingWithNamespaceIsolationPolicies() throws Exception { final String tenant = "my-tenant"; final String namespace = "my-tenant/use/my-ns"; final String bundle = "0x00000000_0xffffffff"; - final String brokerAddress = pulsar1.getAdvertisedAddress(); - final String broker1Address = pulsar1.getAdvertisedAddress() + 1; + final String brokerHost = pulsar1.getAdvertisedAddress(); + final String brokerAddress = brokerHost + ":8080"; + final String broker1Host = pulsar1.getAdvertisedAddress() + "1"; + final String broker1Address = broker1Host + ":8080"; - admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl("http://" + pulsar1.getAdvertisedAddress()).build()); + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); admin1.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); admin1.namespaces().createNamespace(namespace); @Cleanup - PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsar1.getSafeWebServiceAddress()).build(); + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsar1.getWebServiceAddress()).build(); Producer producer = pulsarClient.newProducer().topic("persistent://" + namespace + "/my-topic1") .create(); ModularLoadManagerImpl loadManager = (ModularLoadManagerImpl) ((ModularLoadManagerWrapper) pulsar1 @@ -676,12 +721,13 @@ public void testLoadSheddingWithNamespaceIsolationPolicies() throws Exception { loadManager.updateAll(); // test1: no isolation policy - assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryHost)); + assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryBrokerId)); // test2: as isolation policy, there are not another broker to load the bundle. String newPolicyJsonTemplate = "{\"namespaces\":[\"%s.*\"],\"primary\":[\"%s\"]," + "\"secondary\":[\"%s\"],\"auto_failover_policy\":{\"policy_type\":\"min_available\",\"parameters\":{\"min_limit\":%s,\"usage_threshold\":80}}}"; - String newPolicyJson = String.format(newPolicyJsonTemplate, namespace, broker1Address,broker1Address, 1); + + String newPolicyJson = String.format(newPolicyJsonTemplate, namespace, broker1Host, broker1Host, 1); String newPolicyName = "my-ns-isolation-policies"; ObjectMapper jsonMapper = ObjectMapperFactory.create(); NamespaceIsolationDataImpl nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(), @@ -690,11 +736,11 @@ public void testLoadSheddingWithNamespaceIsolationPolicies() throws Exception { assertFalse(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, broker1Address)); // test3: as isolation policy, there are another can load the bundle. - String newPolicyJson1 = String.format(newPolicyJsonTemplate, namespace, brokerAddress,brokerAddress, 1); + String newPolicyJson1 = String.format(newPolicyJsonTemplate, namespace, brokerHost, brokerHost, 1); NamespaceIsolationDataImpl nsPolicyData1 = jsonMapper.readValue(newPolicyJson1.getBytes(), NamespaceIsolationDataImpl.class); admin1.clusters().updateNamespaceIsolationPolicy(cluster, newPolicyName, nsPolicyData1); - assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryHost)); + assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryBrokerId)); producer.close(); } @@ -711,7 +757,7 @@ public void testOwnBrokerZnodeByMultipleBroker() throws Exception { ServiceConfiguration config = new ServiceConfiguration(); config.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); config.setClusterName("use"); - config.setWebServicePort(Optional.of(0)); + config.setWebServicePort(Optional.of(PortManager.nextLockedFreePort())); config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -719,10 +765,12 @@ public void testOwnBrokerZnodeByMultipleBroker() throws Exception { PulsarService pulsar = new PulsarService(config); // create znode using different zk-session final String brokerZnode = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + pulsar.getAdvertisedAddress() + ":" - + config.getWebServicePort(); - pulsar1.getLocalMetadataStore().put(brokerZnode, new byte[0], Optional.empty(), EnumSet.of(CreateOption.Ephemeral)).join(); + + config.getWebServicePort().get(); + pulsar1.getLocalMetadataStore() + .put(brokerZnode, new byte[0], Optional.empty(), EnumSet.of(CreateOption.Ephemeral)).join(); try { pulsar.start(); + fail("should have failed"); } catch (PulsarServerException e) { //Ok. } @@ -749,4 +797,119 @@ public void testRemoveDeadBrokerTimeAverageData() throws Exception { Awaitility.await().untilAsserted(() -> assertTrue(pulsar1.getLeaderElectionService().isLeader())); assertEquals(data.size(), 1); } + + + @Test + public void testRemoveNonExistBundleData() + throws PulsarAdminException, InterruptedException, + PulsarClientException, PulsarServerException, NoSuchFieldException, IllegalAccessException { + final String cluster = "use"; + final String tenant = "my-tenant"; + final String namespace = "remove-non-exist-bundle-data-ns"; + final String topicName = tenant + "/" + namespace + "/" + "topic"; + int bundleNumbers = 8; + + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); + admin1.tenants().createTenant(tenant, + new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); + admin1.namespaces().createNamespace(tenant + "/" + namespace, bundleNumbers); + + @Cleanup + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsar1.getBrokerServiceUrl()).build(); + + // create a lot of topic to fully distributed among bundles. + for (int i = 0; i < 10; i++) { + String topicNameI = topicName + i; + admin1.topics().createPartitionedTopic(topicNameI, 20); + // trigger bundle assignment + + pulsarClient.newConsumer().topic(topicNameI) + .subscriptionName("my-subscriber-name2").subscribe(); + } + + ModularLoadManagerWrapper loadManagerWrapper = (ModularLoadManagerWrapper) pulsar1.getLoadManager().get(); + ModularLoadManagerImpl lm1 = (ModularLoadManagerImpl) loadManagerWrapper.getLoadManager(); + ModularLoadManagerWrapper loadManager2 = (ModularLoadManagerWrapper) pulsar2.getLoadManager().get(); + ModularLoadManagerImpl lm2 = (ModularLoadManagerImpl) loadManager2.getLoadManager(); + + Field executors = lm1.getClass().getDeclaredField("executors"); + executors.setAccessible(true); + ExecutorService executorService = (ExecutorService) executors.get(lm1); + + assertEquals(lm1.getAvailableBrokers().size(), 2); + + pulsar1.getBrokerService().updateRates(); + pulsar2.getBrokerService().updateRates(); + + lm1.writeBrokerDataOnZooKeeper(true); + lm2.writeBrokerDataOnZooKeeper(true); + + // wait for metadata store notification finish + CountDownLatch latch = new CountDownLatch(1); + executorService.submit(latch::countDown); + latch.await(); + + loadManagerWrapper.writeResourceQuotasToZooKeeper(); + + MetadataCache bundlesCache = pulsar1.getLocalMetadataStore().getMetadataCache(BundleData.class); + + // trigger bundle split + String topicToFindBundle = topicName + 0; + NamespaceBundle bundleWillBeSplit = pulsar1.getNamespaceService().getBundle(TopicName.get(topicToFindBundle)); + + final Optional leastLoaded = loadManagerWrapper.getLeastLoaded(bundleWillBeSplit); + assertFalse(leastLoaded.isEmpty()); + + String bundleDataPath = ModularLoadManagerImpl.BUNDLE_DATA_PATH + "/" + tenant + "/" + namespace; + CompletableFuture> children = bundlesCache.getChildren(bundleDataPath); + List bundles = children.join(); + assertTrue(bundles.contains(bundleWillBeSplit.getBundleRange())); + + // after updateAll no namespace bundle data is deleted from metadata store. + lm1.updateAll(); + + children = bundlesCache.getChildren(bundleDataPath); + bundles = children.join(); + assertFalse(bundles.isEmpty()); + assertEquals(bundleNumbers, bundles.size()); + + NamespaceName namespaceName = NamespaceName.get(tenant, namespace); + pulsar1.getAdminClient().namespaces().splitNamespaceBundle(tenant + "/" + namespace, + bundleWillBeSplit.getBundleRange(), + false, NamespaceBundleSplitAlgorithm.RANGE_EQUALLY_DIVIDE_NAME); + + NamespaceBundles allBundlesAfterSplit = + pulsar1.getNamespaceService().getNamespaceBundleFactory() + .getBundles(namespaceName); + + assertFalse(allBundlesAfterSplit.getBundles().contains(bundleWillBeSplit)); + + // the bundle data should be deleted + + pulsar1.getBrokerService().updateRates(); + pulsar2.getBrokerService().updateRates(); + + lm1.writeBrokerDataOnZooKeeper(true); + lm2.writeBrokerDataOnZooKeeper(true); + + latch = new CountDownLatch(1); + // wait for metadata store notification finish + CountDownLatch finalLatch = latch; + executorService.submit(finalLatch::countDown); + latch.await(); + + loadManagerWrapper.writeResourceQuotasToZooKeeper(); + + lm1.updateAll(); + + log.info("update all triggered."); + + // check bundle data should be deleted from metadata store. + + CompletableFuture> childrenAfterSplit = bundlesCache.getChildren(bundleDataPath); + List bundlesAfterSplit = childrenAfterSplit.join(); + + assertFalse(bundlesAfterSplit.contains(bundleWillBeSplit.getBundleRange())); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java index 00182fffb8a31..4b4042cf31a72 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java @@ -26,6 +26,7 @@ import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; @Test(groups = "broker") public class UniformLoadShedderTest { @@ -119,4 +120,81 @@ public void testBrokerWithMultipleBundles() { assertFalse(bundlesToUnload.isEmpty()); } + @Test + public void testOverloadBrokerSelect() { + conf.setMaxUnloadBundleNumPerShedding(1); + conf.setMaxUnloadPercentage(0.5); + int numBrokers = 5; + int numBundles = 5; + LoadData loadData = new LoadData(); + + LocalBrokerData[] localBrokerDatas = new LocalBrokerData[]{ + new LocalBrokerData(), + new LocalBrokerData(), + new LocalBrokerData(), + new LocalBrokerData(), + new LocalBrokerData()}; + + String[] brokerNames = new String[]{"broker0", "broker1", "broker2", "broker3", "broker4"}; + + double[] brokerMsgRates = new double[]{ + 50000, // broker0 + 60000, // broker1 + 70000, // broker2 + 10000, // broker3 + 20000};// broker4 + + double[] brokerMsgThroughputs = new double[]{ + 50 * 1024 * 1024, // broker0 + 60 * 1024 * 1024, // broker1 + 70 * 1024 * 1024, // broker2 + 80 * 1024 * 1024, // broker3 + 10 * 1024 * 1024};// broker4 + + + for (int brokerId = 0; brokerId < numBrokers; brokerId++) { + double msgRate = brokerMsgRates[brokerId] / numBundles; + double throughput = brokerMsgThroughputs[brokerId] / numBundles; + for (int i = 0; i < numBundles; ++i) { + String bundleName = "broker-" + brokerId + "-bundle-" + i; + localBrokerDatas[brokerId].getBundles().add(bundleName); + localBrokerDatas[brokerId].setMsgRateIn(brokerMsgRates[brokerId]); + localBrokerDatas[brokerId].setMsgThroughputIn(brokerMsgThroughputs[brokerId]); + BundleData bundle = new BundleData(); + + TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData(); + timeAverageMessageData.setMsgRateIn(msgRate); + timeAverageMessageData.setMsgThroughputIn(throughput); + bundle.setShortTermData(timeAverageMessageData); + loadData.getBundleData().put(bundleName, bundle); + } + loadData.getBrokerData().put(brokerNames[brokerId], new BrokerData(localBrokerDatas[brokerId])); + } + + // disable throughput based load shedding, enable rate based load shedding only + conf.setLoadBalancerMsgRateDifferenceShedderThreshold(50); + conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(0); + + Multimap bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); + assertEquals(bundlesToUnload.size(), 1); + assertTrue(bundlesToUnload.containsKey("broker2")); + + + // disable rate based load shedding, enable throughput based load shedding only + conf.setLoadBalancerMsgRateDifferenceShedderThreshold(0); + conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(2); + + bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); + assertEquals(bundlesToUnload.size(), 1); + assertTrue(bundlesToUnload.containsKey("broker3")); + + // enable both rate and throughput based load shedding, but rate based load shedding has higher priority + conf.setLoadBalancerMsgRateDifferenceShedderThreshold(50); + conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(2); + + bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); + assertEquals(bundlesToUnload.size(), 1); + assertTrue(bundlesToUnload.containsKey("broker2")); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java index ac5d92c880227..f3fabc910346d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java @@ -349,14 +349,14 @@ public void testUnloadNamespaceBundleWithStuckTopic() throws Exception { @Test public void testLoadReportDeserialize() throws Exception { - final String candidateBroker1 = "http://localhost:8000"; - final String candidateBroker2 = "http://localhost:3000"; - LoadReport lr = new LoadReport(null, null, candidateBroker1, null); - LocalBrokerData ld = new LocalBrokerData(null, null, candidateBroker2, null); - URI uri1 = new URI(candidateBroker1); - URI uri2 = new URI(candidateBroker2); - String path1 = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri1.getHost(), uri1.getPort()); - String path2 = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri2.getHost(), uri2.getPort()); + final String candidateBroker1 = "localhost:8000"; + String broker1Url = "pulsar://localhost:6650"; + final String candidateBroker2 = "localhost:3000"; + String broker2Url = "pulsar://localhost:6660"; + LoadReport lr = new LoadReport("http://" + candidateBroker1, null, broker1Url, null); + LocalBrokerData ld = new LocalBrokerData("http://" + candidateBroker2, null, broker2Url, null); + String path1 = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker1); + String path2 = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker2); pulsar.getLocalMetadataStore().put(path1, ObjectMapperFactory.getMapper().writer().writeValueAsBytes(lr), @@ -375,23 +375,23 @@ public void testLoadReportDeserialize() throws Exception { .getAndSet(new ModularLoadManagerWrapper(new ModularLoadManagerImpl())); oldLoadManager.stop(); LookupResult result2 = pulsar.getNamespaceService().createLookupResult(candidateBroker2, false, null).get(); - Assert.assertEquals(result1.getLookupData().getBrokerUrl(), candidateBroker1); - Assert.assertEquals(result2.getLookupData().getBrokerUrl(), candidateBroker2); + Assert.assertEquals(result1.getLookupData().getBrokerUrl(), broker1Url); + Assert.assertEquals(result2.getLookupData().getBrokerUrl(), broker2Url); System.out.println(result2); } @Test public void testCreateLookupResult() throws Exception { - final String candidateBroker = "pulsar://localhost:6650"; + final String candidateBroker = "localhost:8080"; + final String brokerUrl = "pulsar://localhost:6650"; final String listenerUrl = "pulsar://localhost:7000"; final String listenerUrlTls = "pulsar://localhost:8000"; final String listener = "listenerName"; Map advertisedListeners = new HashMap<>(); advertisedListeners.put(listener, AdvertisedListener.builder().brokerServiceUrl(new URI(listenerUrl)).brokerServiceUrlTls(new URI(listenerUrlTls)).build()); - LocalBrokerData ld = new LocalBrokerData(null, null, candidateBroker, null, advertisedListeners); - URI uri = new URI(candidateBroker); - String path = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri.getHost(), uri.getPort()); + LocalBrokerData ld = new LocalBrokerData("http://" + candidateBroker, null, brokerUrl, null, advertisedListeners); + String path = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker); pulsar.getLocalMetadataStore().put(path, ObjectMapperFactory.getMapper().writer().writeValueAsBytes(ld), @@ -401,7 +401,7 @@ public void testCreateLookupResult() throws Exception { LookupResult noListener = pulsar.getNamespaceService().createLookupResult(candidateBroker, false, null).get(); LookupResult withListener = pulsar.getNamespaceService().createLookupResult(candidateBroker, false, listener).get(); - Assert.assertEquals(noListener.getLookupData().getBrokerUrl(), candidateBroker); + Assert.assertEquals(noListener.getLookupData().getBrokerUrl(), brokerUrl); Assert.assertEquals(withListener.getLookupData().getBrokerUrl(), listenerUrl); Assert.assertEquals(withListener.getLookupData().getBrokerUrlTls(), listenerUrlTls); System.out.println(withListener); @@ -683,7 +683,7 @@ public void testSplitBundleWithHighestThroughput() throws Exception { @Test public void testHeartbeatNamespaceMatch() throws Exception { - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), conf); + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), conf); NamespaceBundle namespaceBundle = pulsar.getNamespaceService().getNamespaceBundleFactory().getFullBundle(namespaceName); assertTrue(NamespaceService.isSystemServiceNamespace( NamespaceBundle.getBundleNamespace(namespaceBundle.toString()))); @@ -704,7 +704,7 @@ public void testModularLoadManagerRemoveInactiveBundleFromLoadData() throws Exce Field loadManagerField = NamespaceService.class.getDeclaredField("loadManager"); loadManagerField.setAccessible(true); doReturn(true).when(loadManager).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); Optional res = Optional.of(resourceUnit); doReturn(res).when(loadManager).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager)); @@ -834,10 +834,7 @@ private void waitResourceDataUpdateToZK(LoadManager loadManager) throws Exceptio public CompletableFuture registryBrokerDataChangeNotice() { CompletableFuture completableFuture = new CompletableFuture<>(); - String lookupServiceAddress = pulsar.getAdvertisedAddress() + ":" - + (conf.getWebServicePort().isPresent() ? conf.getWebServicePort().get() - : conf.getWebServicePortTls().get()); - String brokerDataPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; + String brokerDataPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + pulsar.getBrokerId(); pulsar.getLocalMetadataStore().registerListener(notice -> { if (brokerDataPath.equals(notice.getPath())){ if (!completableFuture.isDone()) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java index 0dbfe1760879a..1526611874a62 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java @@ -22,8 +22,12 @@ import com.google.common.collect.Sets; +import lombok.Cleanup; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -34,6 +38,9 @@ public class NamespaceUnloadingTest extends BrokerTestBase { @BeforeMethod @Override protected void setup() throws Exception { + conf.setTopicLevelPoliciesEnabled(true); + conf.setForceDeleteNamespaceAllowed(true); + conf.setTopicLoadTimeoutSeconds(Integer.MAX_VALUE); super.baseSetup(); } @@ -68,4 +75,26 @@ public void testUnloadPartiallyLoadedNamespace() throws Exception { producer.close(); } + @Test + public void testUnloadWithTopicCreation() throws PulsarAdminException, PulsarClientException { + final String namespaceName = "prop/ns_unloading"; + final String topicName = "persistent://prop/ns_unloading/with_topic_creation"; + final int partitions = 5; + admin.namespaces().createNamespace(namespaceName, 1); + admin.topics().createPartitionedTopic(topicName, partitions); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.INT32) + .topic(topicName) + .create(); + + for (int i = 0; i < 100; i++) { + admin.namespaces().unloadNamespaceBundle(namespaceName, "0x00000000_0xffffffff"); + } + + for (int i = 0; i < partitions; i++) { + producer.send(i); + } + admin.namespaces().deleteNamespace(namespaceName, true); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java index e53d5c25bd2b5..22fa2c32b5655 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java @@ -76,7 +76,7 @@ public void testCreateTopicWithNotTopicNsOwnedBroker() { int verifiedBrokerNum = 0; for (PulsarService pulsarService : this.getPulsarServiceList()) { BrokerService bs = pulsarService.getBrokerService(); - if (bs.isTopicNsOwnedByBroker(TopicName.get(topicName))) { + if (bs.isTopicNsOwnedByBrokerAsync(TopicName.get(topicName)).join()) { continue; } verifiedBrokerNum ++; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java index 8dd4f53db8240..46e8989ac3df4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java @@ -80,10 +80,8 @@ protected void startBroker() throws Exception { conf.setBrokerShutdownTimeoutMs(0L); conf.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); conf.setBrokerServicePort(Optional.of(0)); - conf.setBrokerServicePortTls(Optional.of(0)); conf.setAdvertisedAddress("localhost"); conf.setWebServicePort(Optional.of(0)); - conf.setWebServicePortTls(Optional.of(0)); serviceConfigurationList.add(conf); PulsarTestContext.Builder testContextBuilder = diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java index fed827b1517e6..9efd2c109f355 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java @@ -21,23 +21,30 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; - +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; +import java.util.function.Function; +import lombok.Cleanup; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.util.RateLimiter; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - public class ResourceGroupRateLimiterTest extends BrokerTestBase { final String rgName = "testRG"; @@ -147,6 +154,76 @@ public void testResourceGroupPublishRateLimit() throws Exception { testRateLimit(); } + @Test + public void testWithConcurrentUpdate() throws Exception { + cleanup(); + setup(); + createResourceGroup(rgName, testAddRg); + admin.namespaces().setNamespaceResourceGroup(namespaceName, rgName); + + Awaitility.await().untilAsserted(() -> + assertNotNull(pulsar.getResourceGroupServiceManager() + .getNamespaceResourceGroup(NamespaceName.get(namespaceName)))); + + Awaitility.await().untilAsserted(() -> + assertNotNull(pulsar.getResourceGroupServiceManager() + .resourceGroupGet(rgName).getResourceGroupPublishLimiter())); + + ResourceGroupPublishLimiter resourceGroupPublishLimiter = Mockito.spy(pulsar.getResourceGroupServiceManager() + .resourceGroupGet(rgName).getResourceGroupPublishLimiter()); + + AtomicBoolean blocking = new AtomicBoolean(false); + BiFunction, Long, Boolean> blockFunc = (function, acquirePermit) -> { + blocking.set(true); + while (blocking.get()) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + return function.apply(acquirePermit); + }; + + Mockito.doAnswer(invocation -> { + RateLimiter publishRateLimiterOnMessage = + (RateLimiter) FieldUtils.readDeclaredField(resourceGroupPublishLimiter, + "publishRateLimiterOnMessage", true); + RateLimiter publishRateLimiterOnByte = + (RateLimiter) FieldUtils.readDeclaredField(resourceGroupPublishLimiter, + "publishRateLimiterOnByte", true); + int numbers = invocation.getArgument(0); + long bytes = invocation.getArgument(1); + return (publishRateLimiterOnMessage == null || publishRateLimiterOnMessage.tryAcquire(numbers)) + && (publishRateLimiterOnByte == null || blockFunc.apply(publishRateLimiterOnByte::tryAcquire, bytes)); + }).when(resourceGroupPublishLimiter).tryAcquire(Mockito.anyInt(), Mockito.anyLong()); + + ConcurrentHashMap resourceGroupsMap = + (ConcurrentHashMap) FieldUtils.readDeclaredField(pulsar.getResourceGroupServiceManager(), + "resourceGroupsMap", true); + FieldUtils.writeDeclaredField(resourceGroupsMap.get(rgName), "resourceGroupPublishLimiter", + resourceGroupPublishLimiter, true); + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(namespaceName + "/test-topic") + .create(); + + CompletableFuture sendFuture = producer.sendAsync(new byte[MESSAGE_SIZE]); + + Awaitility.await().untilAsserted(() -> Assert.assertTrue(blocking.get())); + + testAddRg.setPublishRateInBytes(Long.valueOf(MESSAGE_SIZE) + 1); + admin.resourcegroups().updateResourceGroup(rgName, testAddRg); + blocking.set(false); + + sendFuture.join(); + + // Now detach the namespace + admin.namespaces().removeNamespaceResourceGroup(namespaceName); + deleteResourceGroup(rgName); + } + + private void prepareData() { testAddRg.setPublishRateInBytes(Long.valueOf(MESSAGE_SIZE)); testAddRg.setPublishRateInMsgs(1); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java new file mode 100644 index 0000000000000..f8034c37971cc --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import io.netty.channel.DefaultEventLoop; +import io.netty.util.internal.DefaultPriorityQueue; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.pulsar.broker.PulsarServerException; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ConnectionPool; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; +import org.testng.Assert; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class AbstractReplicatorTest { + + @Test + public void testRetryStartProducerStoppedByTopicRemove() throws Exception { + final String localCluster = "localCluster"; + final String remoteCluster = "remoteCluster"; + final String topicName = "remoteTopicName"; + final String replicatorPrefix = "pulsar.repl"; + final DefaultEventLoop eventLoopGroup = new DefaultEventLoop(); + // Mock services. + final ServiceConfiguration pulsarConfig = mock(ServiceConfiguration.class); + final PulsarService pulsar = mock(PulsarService.class); + final BrokerService broker = mock(BrokerService.class); + final Topic localTopic = mock(Topic.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + final PulsarClientImpl localClient = mock(PulsarClientImpl.class); + when(localClient.getCnxPool()).thenReturn(connectionPool); + final PulsarClientImpl remoteClient = mock(PulsarClientImpl.class); + when(remoteClient.getCnxPool()).thenReturn(connectionPool); + final ProducerBuilder producerBuilder = mock(ProducerBuilder.class); + final ConcurrentOpenHashMap>> topics = new ConcurrentOpenHashMap<>(); + when(broker.executor()).thenReturn(eventLoopGroup); + when(broker.getTopics()).thenReturn(topics); + when(remoteClient.newProducer(any(Schema.class))).thenReturn(producerBuilder); + when(broker.pulsar()).thenReturn(pulsar); + when(pulsar.getClient()).thenReturn(localClient); + when(pulsar.getConfiguration()).thenReturn(pulsarConfig); + when(pulsarConfig.getReplicationProducerQueueSize()).thenReturn(100); + when(localTopic.getName()).thenReturn(topicName); + when(producerBuilder.topic(any())).thenReturn(producerBuilder); + when(producerBuilder.messageRoutingMode(any())).thenReturn(producerBuilder); + when(producerBuilder.enableBatching(anyBoolean())).thenReturn(producerBuilder); + when(producerBuilder.sendTimeout(anyInt(), any())).thenReturn(producerBuilder); + when(producerBuilder.maxPendingMessages(anyInt())).thenReturn(producerBuilder); + when(producerBuilder.producerName(anyString())).thenReturn(producerBuilder); + // Mock create producer fail. + when(producerBuilder.create()).thenThrow(new RuntimeException("mocked ex")); + when(producerBuilder.createAsync()) + .thenReturn(CompletableFuture.failedFuture(new RuntimeException("mocked ex"))); + // Make race condition: "retry start producer" and "close replicator". + final ReplicatorInTest replicator = new ReplicatorInTest(localCluster, localTopic, remoteCluster, topicName, + replicatorPrefix, broker, remoteClient); + replicator.startProducer(); + replicator.disconnect(); + + // Verify task will done. + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + AtomicInteger taskCounter = new AtomicInteger(); + CountDownLatch checkTaskFinished = new CountDownLatch(1); + eventLoopGroup.execute(() -> { + synchronized (replicator) { + LinkedBlockingQueue taskQueue = WhiteboxImpl.getInternalState(eventLoopGroup, "taskQueue"); + DefaultPriorityQueue scheduledTaskQueue = + WhiteboxImpl.getInternalState(eventLoopGroup, "scheduledTaskQueue"); + taskCounter.set(taskQueue.size() + scheduledTaskQueue.size()); + checkTaskFinished.countDown(); + } + }); + checkTaskFinished.await(); + Assert.assertEquals(taskCounter.get(), 0); + }); + } + + private static class ReplicatorInTest extends AbstractReplicator { + + public ReplicatorInTest(String localCluster, Topic localTopic, String remoteCluster, String remoteTopicName, + String replicatorPrefix, BrokerService brokerService, + PulsarClientImpl replicationClient) throws PulsarServerException { + super(localCluster, localTopic, remoteCluster, remoteTopicName, replicatorPrefix, brokerService, + replicationClient); + } + + @Override + protected String getProducerName() { + return "pulsar.repl.producer"; + } + + @Override + protected void readEntries(Producer producer) { + + } + + @Override + protected Position getReplicatorReadPosition() { + return PositionImpl.EARLIEST; + } + + @Override + protected long getNumberOfEntriesInBacklog() { + return 0; + } + + @Override + protected void disableReplicatorRead() { + + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java index 554c663850fd9..19e40ebf9960f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java @@ -75,7 +75,7 @@ public void testAdvertisedAddress() throws Exception { Assert.assertEquals( pulsar.getAdvertisedAddress(), advertisedAddress ); Assert.assertEquals( pulsar.getBrokerServiceUrl(), String.format("pulsar://%s:%d", advertisedAddress, pulsar.getBrokerListenPort().get()) ); Assert.assertEquals( pulsar.getSafeWebServiceAddress(), String.format("http://%s:%d", advertisedAddress, pulsar.getListenPortHTTP().get()) ); - String brokerZkPath = String.format("/loadbalance/brokers/%s:%d", pulsar.getAdvertisedAddress(), pulsar.getListenPortHTTP().get()); + String brokerZkPath = String.format("/loadbalance/brokers/%s", pulsar.getBrokerId()); String bkBrokerData = new String(bkEnsemble.getZkClient().getData(brokerZkPath, false, new Stat()), StandardCharsets.UTF_8); JsonObject jsonBkBrokerData = new Gson().fromJson(bkBrokerData, JsonObject.class); Assert.assertEquals( jsonBkBrokerData.get("pulsarServiceUrl").getAsString(), pulsar.getBrokerServiceUrl() ); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java index f3463ee121d75..0ac5fdaef1599 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java @@ -127,8 +127,8 @@ void setup() throws Exception { config.setManagedLedgerMaxEntriesPerLedger(MAX_ENTRIES_PER_LEDGER); config.setManagedLedgerMinLedgerRolloverTimeMinutes(0); config.setAllowAutoTopicCreationType(TopicType.NON_PARTITIONED); - config.setSystemTopicEnabled(false); - config.setTopicLevelPoliciesEnabled(false); + config.setSystemTopicEnabled(true); + config.setTopicLevelPoliciesEnabled(true); config.setForceDeleteNamespaceAllowed(true); pulsar = new PulsarService(config); @@ -1169,8 +1169,13 @@ public void testProducerException() throws Exception { assertTrue(gotException, "backlog exceeded exception did not occur"); } - @Test - public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { + @DataProvider(name = "dedupTestSet") + public static Object[][] dedupTestSet() { + return new Object[][] { { Boolean.TRUE }, { Boolean.FALSE } }; + } + + @Test(dataProvider = "dedupTestSet") + public void testProducerExceptionAndThenUnblockSizeQuota(boolean dedupTestSet) throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/quotahold"), new HashMap<>()); admin.namespaces().setBacklogQuota("prop/quotahold", @@ -1186,9 +1191,12 @@ public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { boolean gotException = false; Consumer consumer = client.newConsumer().topic(topic1).subscriptionName(subName1).subscribe(); - byte[] content = new byte[1024]; Producer producer = createProducer(client, topic1); + + admin.topicPolicies().setDeduplicationStatus(topic1, dedupTestSet); + Thread.sleep((TIME_TO_CHECK_BACKLOG_QUOTA + 1) * 1000); + for (int i = 0; i < 10; i++) { producer.send(content); } @@ -1207,6 +1215,7 @@ public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { } assertTrue(gotException, "backlog exceeded exception did not occur"); + assertFalse(producer.isConnected()); // now remove backlog and ensure that producer is unblocked; TopicStats stats = getTopicStats(topic1); @@ -1223,14 +1232,33 @@ public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { Exception sendException = null; gotException = false; try { - for (int i = 0; i < 5; i++) { + for (int i = 0; i < 10; i++) { producer.send(content); + Message msg = consumer.receive(); + consumer.acknowledge(msg); } } catch (Exception e) { gotException = true; sendException = e; } + Thread.sleep((TIME_TO_CHECK_BACKLOG_QUOTA + 1) * 1000); assertFalse(gotException, "unable to publish due to " + sendException); + + gotException = false; + long lastDisconnectedTimestamp = producer.getLastDisconnectedTimestamp(); + try { + // try to send over backlog quota and make sure it passes + producer.send(content); + producer.send(content); + } catch (PulsarClientException ce) { + assertTrue(ce instanceof PulsarClientException.ProducerBlockedQuotaExceededException + || ce instanceof PulsarClientException.TimeoutException, ce.getMessage()); + gotException = true; + sendException = ce; + } + assertFalse(gotException, "unable to publish due to " + sendException); + assertEquals(lastDisconnectedTimestamp, producer.getLastDisconnectedTimestamp()); + } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java index 433f5e56d952d..8e902d5d1e700 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java @@ -18,8 +18,17 @@ */ package org.apache.pulsar.broker.service; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import com.carrotsearch.hppc.ObjectSet; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -27,19 +36,32 @@ import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.Entry; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; +import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.ConsumerBuilder; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; +import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.BitSetRecyclable; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +@Slf4j @Test(groups = "broker") public class BatchMessageWithBatchIndexLevelTest extends BatchMessageTest { @@ -280,4 +302,370 @@ public void testAckMessageWithNotOwnerConsumerUnAckMessageCount() throws Excepti Awaitility.await().until(() -> getPulsar().getBrokerService().getTopic(topicName, false) .get().get().getSubscription(subName).getConsumers().get(0).getUnackedMessages() == 0); } + + @Test + public void testNegativeAckAndLongAckDelayWillNotLeadRepeatConsume() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://prop/ns-abc/tp_"); + final String subscriptionName = "s1"; + final int redeliveryDelaySeconds = 2; + + // Create producer and consumer. + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .enableBatching(true) + .batchingMaxMessages(1000) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + ConsumerImpl consumer = (ConsumerImpl) pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .negativeAckRedeliveryDelay(redeliveryDelaySeconds, TimeUnit.SECONDS) + .enableBatchIndexAcknowledgment(true) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .acknowledgmentGroupTime(1, TimeUnit.HOURS) + .subscribe(); + + // Send 10 messages in batch. + ArrayList messagesSent = new ArrayList<>(); + List> sendTasks = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + String msg = Integer.valueOf(i).toString(); + sendTasks.add(producer.sendAsync(Integer.valueOf(i).toString())); + messagesSent.add(msg); + } + producer.flush(); + FutureUtil.waitForAll(sendTasks).join(); + + // Receive messages. + ArrayList messagesReceived = new ArrayList<>(); + // NegativeAck "batchMessageIdIndex1" once. + boolean index1HasBeenNegativeAcked = false; + while (true) { + Message message = consumer.receive(2, TimeUnit.SECONDS); + if (message == null) { + break; + } + if (index1HasBeenNegativeAcked) { + messagesReceived.add(message.getValue()); + consumer.acknowledge(message); + continue; + } + if (((MessageIdAdv) message.getMessageId()).getBatchIndex() == 1) { + consumer.negativeAcknowledge(message); + index1HasBeenNegativeAcked = true; + continue; + } + messagesReceived.add(message.getValue()); + consumer.acknowledge(message); + } + + // Receive negative acked messages. + // Wait the message negative acknowledgment finished. + int tripleRedeliveryDelaySeconds = redeliveryDelaySeconds * 3; + while (true) { + Message message = consumer.receive(tripleRedeliveryDelaySeconds, TimeUnit.SECONDS); + if (message == null) { + break; + } + messagesReceived.add(message.getValue()); + consumer.acknowledge(message); + } + + log.info("messagesSent: {}, messagesReceived: {}", messagesSent, messagesReceived); + Assert.assertEquals(messagesReceived.size(), messagesSent.size()); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topicName); + } + + @Test + public void testMixIndexAndNonIndexUnAckMessageCount() throws Exception { + final String topicName = "persistent://prop/ns-abc/testMixIndexAndNonIndexUnAckMessageCount-"; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Shared) + .acknowledgmentGroupTime(100, TimeUnit.MILLISECONDS) + .enableBatchIndexAcknowledgment(true) + .isAckReceiptEnabled(true) + .subscribe(); + + // send two batch messages: [(1), (2,3)] + producer.send("1".getBytes()); + producer.sendAsync("2".getBytes()); + producer.send("3".getBytes()); + + Message message1 = consumer.receive(); + Message message2 = consumer.receive(); + Message message3 = consumer.receive(); + consumer.acknowledgeAsync(message1); + consumer.acknowledge(message2); // send group ack: non-index ack for 1, index ack for 2 + consumer.acknowledge(message3); // index ack for 3 + + assertEquals(admin.topics().getStats(topicName).getSubscriptions() + .get("sub").getUnackedMessages(), 0); + } + + @Test + public void testUnAckMessagesWhenConcurrentDeliveryAndAck() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://prop/ns-abc/tp"); + final String subName = "s1"; + final int receiverQueueSize = 500; + admin.topics().createNonPartitionedTopic(topicName); + admin.topics().createSubscription(topicName, subName, MessageId.earliest); + ConsumerBuilder consumerBuilder = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .receiverQueueSize(receiverQueueSize) + .subscriptionName(subName) + .enableBatchIndexAcknowledgment(true) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true); + + // Send 100 messages. + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + CompletableFuture lastSent = null; + for (int i = 0; i < 100; i++) { + lastSent = producer.sendAsync(i + ""); + } + producer.flush(); + lastSent.join(); + + // When consumer1 is closed, may some messages are in the client memory(it they are being acked now). + Consumer consumer1 = consumerBuilder.consumerName("c1").subscribe(); + Message[] messagesInClientMemory = new Message[2]; + for (int i = 0; i < 2; i++) { + Message msg = consumer1.receive(2, TimeUnit.SECONDS); + assertNotNull(msg); + messagesInClientMemory[i] = msg; + } + ConsumerImpl consumer2 = (ConsumerImpl) consumerBuilder.consumerName("c2").subscribe(); + Awaitility.await().until(() -> consumer2.isConnected()); + + // The consumer2 will receive messages after consumer1 closed. + // Insert a delay mechanism to make the flow like below: + // 1. Close consumer1, then the 100 messages will be redelivered. + // 2. Read redeliver messages. No messages were acked at this time. + // 3. The in-flight ack of two messages is finished. + // 4. Send the messages to consumer2, consumer2 will get all the 100 messages. + CompletableFuture receiveMessageSignal2 = new CompletableFuture<>(); + org.apache.pulsar.broker.service.Consumer serviceConsumer2 = + makeConsumerReceiveMessagesDelay(topicName, subName, "c2", receiveMessageSignal2); + // step 1: close consumer. + consumer1.close(); + // step 2: wait for read messages from replay queue. + Thread.sleep(2 * 1000); + // step 3: wait for the in-flight ack. + BitSetRecyclable bitSetRecyclable = createBitSetRecyclable(100); + long ledgerId = 0, entryId = 0; + for (Message message : messagesInClientMemory) { + BatchMessageIdImpl msgId = (BatchMessageIdImpl) message.getMessageId(); + bitSetRecyclable.clear(msgId.getBatchIndex()); + ledgerId = msgId.getLedgerId(); + entryId = msgId.getEntryId(); + } + getCursor(topicName, subName).delete(PositionImpl.get(ledgerId, entryId, bitSetRecyclable.toLongArray())); + // step 4: send messages to consumer2. + receiveMessageSignal2.complete(null); + // Verify: Consumer2 will get all the 100 messages, and "unAckMessages" is 100. + List messages2 = new ArrayList<>(); + while (true) { + Message msg = consumer2.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messages2.add(msg); + } + assertEquals(messages2.size(), 100); + assertEquals(serviceConsumer2.getUnackedMessages(), 100); + // After the messages were pop out, the permits in the client memory went to 100. + Awaitility.await().untilAsserted(() -> { + assertEquals(serviceConsumer2.getAvailablePermits() + consumer2.getAvailablePermits(), + receiverQueueSize); + }); + + // cleanup. + producer.close(); + consumer2.close(); + admin.topics().delete(topicName, false); + } + + private BitSetRecyclable createBitSetRecyclable(int batchSize) { + BitSetRecyclable bitSetRecyclable = new BitSetRecyclable(batchSize); + for (int i = 0; i < batchSize; i++) { + bitSetRecyclable.set(i); + } + return bitSetRecyclable; + } + + private ManagedCursorImpl getCursor(String topic, String sub) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topic, false).join().get(); + PersistentDispatcherMultipleConsumers dispatcher = + (PersistentDispatcherMultipleConsumers) persistentTopic.getSubscription(sub).getDispatcher(); + return (ManagedCursorImpl) dispatcher.getCursor(); + } + + /*** + * After {@param signal} complete, the consumer({@param consumerName}) start to receive messages. + */ + private org.apache.pulsar.broker.service.Consumer makeConsumerReceiveMessagesDelay(String topic, String sub, + String consumerName, + CompletableFuture signal) throws Exception { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topic, false).join().get(); + PersistentDispatcherMultipleConsumers dispatcher = + (PersistentDispatcherMultipleConsumers) persistentTopic.getSubscription(sub).getDispatcher(); + org.apache.pulsar.broker.service.Consumer serviceConsumer = null; + for (org.apache.pulsar.broker.service.Consumer c : dispatcher.getConsumers()){ + if (c.consumerName().equals(consumerName)) { + serviceConsumer = c; + break; + } + } + final org.apache.pulsar.broker.service.Consumer originalConsumer = serviceConsumer; + + // Insert a delay signal. + org.apache.pulsar.broker.service.Consumer spyServiceConsumer = spy(originalConsumer); + doAnswer(invocation -> { + List entries = (List) invocation.getArguments()[0]; + EntryBatchSizes batchSizes = (EntryBatchSizes) invocation.getArguments()[1]; + EntryBatchIndexesAcks batchIndexesAcks = (EntryBatchIndexesAcks) invocation.getArguments()[2]; + int totalMessages = (int) invocation.getArguments()[3]; + long totalBytes = (long) invocation.getArguments()[4]; + long totalChunkedMessages = (long) invocation.getArguments()[5]; + RedeliveryTracker redeliveryTracker = (RedeliveryTracker) invocation.getArguments()[6]; + return signal.thenApply(__ -> originalConsumer.sendMessages(entries, batchSizes, batchIndexesAcks, totalMessages, totalBytes, + totalChunkedMessages, redeliveryTracker)).join(); + }).when(spyServiceConsumer) + .sendMessages(anyList(), any(), any(), anyInt(), anyLong(), anyLong(), any()); + doAnswer(invocation -> { + List entries = (List) invocation.getArguments()[0]; + EntryBatchSizes batchSizes = (EntryBatchSizes) invocation.getArguments()[1]; + EntryBatchIndexesAcks batchIndexesAcks = (EntryBatchIndexesAcks) invocation.getArguments()[2]; + int totalMessages = (int) invocation.getArguments()[3]; + long totalBytes = (long) invocation.getArguments()[4]; + long totalChunkedMessages = (long) invocation.getArguments()[5]; + RedeliveryTracker redeliveryTracker = (RedeliveryTracker) invocation.getArguments()[6]; + long epoch = (long) invocation.getArguments()[7]; + return signal.thenApply(__ -> originalConsumer.sendMessages(entries, batchSizes, batchIndexesAcks, totalMessages, totalBytes, + totalChunkedMessages, redeliveryTracker, epoch)).join(); + }).when(spyServiceConsumer) + .sendMessages(anyList(), any(), any(), anyInt(), anyLong(), anyLong(), any(), anyLong()); + + // Replace the consumer. + Field fConsumerList = AbstractDispatcherMultipleConsumers.class.getDeclaredField("consumerList"); + Field fConsumerSet = AbstractDispatcherMultipleConsumers.class.getDeclaredField("consumerSet"); + fConsumerList.setAccessible(true); + fConsumerSet.setAccessible(true); + List consumerList = + (List) fConsumerList.get(dispatcher); + ObjectSet consumerSet = + (ObjectSet) fConsumerSet.get(dispatcher); + + consumerList.remove(originalConsumer); + consumerSet.removeAll(originalConsumer); + consumerList.add(spyServiceConsumer); + consumerSet.add(spyServiceConsumer); + return originalConsumer; + } + + /*** + * 1. Send a batch message contains 100 single messages. + * 2. Ack 2 messages. + * 3. Redeliver the batch message and ack them. + * 4. Verify: the permits is correct. + */ + @Test + public void testPermitsIfHalfAckBatchMessage() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://prop/ns-abc/tp"); + final String subName = "s1"; + final int receiverQueueSize = 1000; + final int ackedMessagesCountInTheFistStep = 2; + admin.topics().createNonPartitionedTopic(topicName); + admin.topics(). createSubscription(topicName, subName, MessageId.earliest); + ConsumerBuilder consumerBuilder = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .receiverQueueSize(receiverQueueSize) + .subscriptionName(subName) + .enableBatchIndexAcknowledgment(true) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true); + + // Send 100 messages. + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + CompletableFuture lastSent = null; + for (int i = 1; i <= 100; i++) { + lastSent = producer. sendAsync(i + ""); + } + producer.flush(); + lastSent.join(); + + // Ack 2 messages, and trigger a redelivery. + Consumer consumer1 = consumerBuilder.subscribe(); + for (int i = 0; i < ackedMessagesCountInTheFistStep; i++) { + Message msg = consumer1. receive(2, TimeUnit.SECONDS); + assertNotNull(msg); + consumer1.acknowledge(msg); + } + consumer1.close(); + + // Receive the left 98 messages, and ack them. + // Verify the permits is correct. + ConsumerImpl consumer2 = (ConsumerImpl) consumerBuilder.subscribe(); + Awaitility.await().until(() -> consumer2.isConnected()); + List messages = new ArrayList<>(); + int nextMessageValue = ackedMessagesCountInTheFistStep + 1; + while (true) { + Message msg = consumer2.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + assertEquals(msg.getValue(), nextMessageValue + ""); + messages.add(msg.getMessageId()); + nextMessageValue++; + } + assertEquals(messages.size(), 98); + consumer2.acknowledge(messages); + + org.apache.pulsar.broker.service.Consumer serviceConsumer2 = + getTheUniqueServiceConsumer(topicName, subName); + Awaitility.await().untilAsserted(() -> { + // After the messages were pop out, the permits in the client memory went to 98. + int permitsInClientMemory = consumer2.getAvailablePermits(); + int permitsInBroker = serviceConsumer2.getAvailablePermits(); + assertEquals(permitsInClientMemory + permitsInBroker, receiverQueueSize); + }); + + // cleanup. + producer.close(); + consumer2.close(); + admin.topics().delete(topicName, false); + } + + private org.apache.pulsar.broker.service.Consumer getTheUniqueServiceConsumer(String topic, String sub) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService(). getTopic(topic, false).join().get(); + PersistentDispatcherMultipleConsumers dispatcher = + (PersistentDispatcherMultipleConsumers) persistentTopic.getSubscription(sub).getDispatcher(); + return dispatcher.getConsumers().iterator().next(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BkEnsemblesChaosTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BkEnsemblesChaosTest.java new file mode 100644 index 0000000000000..d49489d8a84b0 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BkEnsemblesChaosTest.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.Producer; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class BkEnsemblesChaosTest extends CanReconnectZKClientPulsarServiceBaseTest { + + @Override + @BeforeClass(alwaysRun = true, timeOut = 300000) + public void setup() throws Exception { + super.setup(); + } + + @Override + @AfterClass(alwaysRun = true, timeOut = 300000) + public void cleanup() throws Exception { + super.cleanup(); + } + + @Test + public void testBookieInfoIsCorrectEvenIfLostNotificationDueToZKClientReconnect() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + final byte[] msgValue = "test".getBytes(); + admin.topics().createNonPartitionedTopic(topicName); + // Ensure broker works. + Producer producer1 = client.newProducer().topic(topicName).create(); + producer1.send(msgValue); + producer1.close(); + admin.topics().unload(topicName); + + // Restart some bookies, which triggers the ZK node of Bookie deleted and created. + // And make the local metadata store reconnect to lose some notification of the ZK node change. + for (int i = 0; i < numberOfBookies - 1; i++){ + bkEnsemble.stopBK(i); + } + makeLocalMetadataStoreKeepReconnect(); + for (int i = 0; i < numberOfBookies - 1; i++){ + bkEnsemble.startBK(i); + } + // Sleep 100ms to lose the notifications of ZK node create. + Thread.sleep(100); + stopLocalMetadataStoreAlwaysReconnect(); + + // Ensure broker still works. + admin.topics().unload(topicName); + Producer producer2 = client.newProducer().topic(topicName).create(); + producer2.send(msgValue); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java index e69714e539be6..40649a4164047 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java @@ -21,8 +21,8 @@ import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.retryStrategically; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.fail; - import java.lang.reflect.Field; import java.util.Map.Entry; import java.util.NavigableMap; @@ -31,10 +31,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; - import com.google.common.collect.Sets; import lombok.Cleanup; - import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; @@ -47,6 +45,7 @@ import org.apache.bookkeeper.util.StringUtils; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; @@ -127,7 +126,7 @@ public void testCrashBrokerWithoutCursorLedgerLeak() throws Exception { // (3) remove topic and managed-ledger from broker which means topic is not closed gracefully consumer.close(); producer.close(); - pulsar.getBrokerService().removeTopicFromCache(topic1); + pulsar.getBrokerService().removeTopicFromCache(topic); ManagedLedgerFactoryImpl factory = (ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory(); Field field = ManagedLedgerFactoryImpl.class.getDeclaredField("ledgers"); field.setAccessible(true); @@ -252,7 +251,7 @@ public void testSkipCorruptDataLedger() throws Exception { // clean managed-ledger and recreate topic to clean any data from the cache producer.close(); - pulsar.getBrokerService().removeTopicFromCache(topic1); + pulsar.getBrokerService().removeTopicFromCache(topic); ManagedLedgerFactoryImpl factory = (ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory(); Field field = ManagedLedgerFactoryImpl.class.getDeclaredField("ledgers"); field.setAccessible(true); @@ -497,10 +496,31 @@ public void testDeleteTopicWithMissingData() throws Exception { // Expected } - // Deletion must succeed - admin.topics().delete(topic); + assertThrows(PulsarAdminException.ServerSideErrorException.class, () -> admin.topics().delete(topic)); + } + + @Test + public void testDeleteTopicWithoutTopicLoaded() throws Exception { + String namespace = BrokerTestUtil.newUniqueName("prop/usc"); + admin.namespaces().createNamespace(namespace); + + String topic = BrokerTestUtil.newUniqueName(namespace + "/my-topic"); + + @Cleanup + PulsarClient client = PulsarClient.builder() + .serviceUrl(pulsar.getBrokerServiceUrl()) + .statsInterval(0, TimeUnit.SECONDS) + .build(); - // Topic will not be there after + @Cleanup + Producer producer = client.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.close(); + admin.topics().unload(topic); + + admin.topics().delete(topic); assertEquals(pulsar.getBrokerService().getTopicIfExists(topic).join(), Optional.empty()); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java index 36d4053ae497f..5252407892eea 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java @@ -156,6 +156,7 @@ public void testBookieIsolation() throws Exception { config.setBrokerServicePort(Optional.of(0)); config.setAdvertisedAddress("localhost"); config.setBookkeeperClientIsolationGroups(brokerBookkeeperClientIsolationGroups); + config.setDefaultNumberOfNamespaceBundles(8); config.setManagedLedgerDefaultEnsembleSize(2); config.setManagedLedgerDefaultWriteQuorum(2); @@ -207,6 +208,9 @@ public void testBookieIsolation() throws Exception { .bookkeeperAffinityGroupPrimary(tenantNamespaceIsolationGroups) .build()); + //Checks the namespace bundles after setting the bookie affinity + assertEquals(admin.namespaces().getBundles(ns2).getNumBundles(), config.getDefaultNumberOfNamespaceBundles()); + try { admin.namespaces().getBookieAffinityGroup(ns1); fail("ns1 should have no bookie affinity group set"); @@ -300,6 +304,7 @@ public void testSetRackInfoAndAffinityGroupDuringProduce() throws Exception { bookies[3].getBookieId()); ServiceConfiguration config = new ServiceConfiguration(); + config.setTopicLevelPoliciesEnabled(false); config.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); config.setClusterName(cluster); config.setWebServicePort(Optional.of(0)); @@ -608,9 +613,9 @@ public void testBookieIsolationWithSecondaryGroup() throws Exception { config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config.setBrokerServicePort(Optional.of(0)); + config.setTopicLevelPoliciesEnabled(false); config.setAdvertisedAddress("localhost"); config.setBookkeeperClientIsolationGroups(brokerBookkeeperClientIsolationGroups); - config.setManagedLedgerDefaultEnsembleSize(2); config.setManagedLedgerDefaultWriteQuorum(2); config.setManagedLedgerDefaultAckQuorum(2); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java index a28b60bbae354..0a6cffc7685d4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java @@ -25,18 +25,27 @@ import java.util.List; +import java.util.Optional; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; import org.apache.pulsar.client.admin.ListNamespaceTopicsOptions; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterClass; @@ -526,4 +535,58 @@ public void testDynamicConfigurationTopicAutoCreationPartitionedWhenDefaultMoreT } } + @Test + public void testExtensibleLoadManagerImplInternalTopicAutoCreations() + throws PulsarAdminException, PulsarClientException { + pulsar.getConfiguration().setAllowAutoTopicCreation(true); + pulsar.getConfiguration().setAllowAutoTopicCreationType(TopicType.PARTITIONED); + pulsar.getConfiguration().setDefaultNumPartitions(3); + pulsar.getConfiguration().setMaxNumPartitionsPerPartitionedTopic(5); + final String namespaceName = NamespaceName.SYSTEM_NAMESPACE.toString(); + TenantInfoImpl tenantInfo = new TenantInfoImpl(); + tenantInfo.setAllowedClusters(Set.of(configClusterName)); + admin.tenants().createTenant("pulsar", tenantInfo); + admin.namespaces().createNamespace(namespaceName); + admin.topics().createNonPartitionedTopic(ServiceUnitStateChannelImpl.TOPIC); + admin.topics().createNonPartitionedTopic(ExtensibleLoadManagerImpl.BROKER_LOAD_DATA_STORE_TOPIC); + admin.topics().createNonPartitionedTopic(ExtensibleLoadManagerImpl.TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); + + // clear the topics to test the auto creation of non-persistent topics. + ConcurrentOpenHashMap>> topics = + pulsar.getBrokerService().getTopics(); + ConcurrentOpenHashMap>> oldTopics = new ConcurrentOpenHashMap<>(); + topics.forEach((key, val) -> oldTopics.put(key, val)); + topics.clear(); + + // The created persistent topic correctly can be found by + // pulsar.getPulsarResources().getTopicResources().persistentTopicExists(topic); + Producer producer = pulsarClient.newProducer().topic(ServiceUnitStateChannelImpl.TOPIC).create(); + + // The created non-persistent topics cannot be found, as we did topics.clear() + try { + pulsarClient.newProducer().topic(ExtensibleLoadManagerImpl.BROKER_LOAD_DATA_STORE_TOPIC).create(); + Assert.fail("Create should have failed."); + } catch (PulsarClientException.TopicDoesNotExistException e) { + // expected + } + try { + pulsarClient.newProducer().topic(ExtensibleLoadManagerImpl.TOP_BUNDLES_LOAD_DATA_STORE_TOPIC).create(); + Assert.fail("Create should have failed."); + } catch (PulsarClientException.TopicDoesNotExistException e) { + // expected + } + + oldTopics.forEach((key, val) -> topics.put(key, val)); + + Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> { + List partitionedTopicList = admin.topics().getPartitionedTopicList(namespaceName); + assertEquals(partitionedTopicList.size(), 0); + }); + + producer.close(); + admin.namespaces().deleteNamespace(namespaceName); + admin.tenants().deleteTenant("pulsar"); + + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceChaosTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceChaosTest.java new file mode 100644 index 0000000000000..4187364e46f65 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceChaosTest.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.testng.Assert.assertEquals; +import java.nio.charset.StandardCharsets; +import java.util.UUID; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.partition.PartitionedTopicMetadata; +import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; +import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.common.policies.data.impl.AutoTopicCreationOverrideImpl; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.awaitility.reflect.WhiteboxImpl; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class BrokerServiceChaosTest extends CanReconnectZKClientPulsarServiceBaseTest { + + @Override + @BeforeClass(alwaysRun = true, timeOut = 300000) + public void setup() throws Exception { + super.setup(); + } + + @Override + @AfterClass(alwaysRun = true, timeOut = 300000) + public void cleanup() throws Exception { + super.cleanup(); + } + + @Test + public void testFetchPartitionedTopicMetadataWithCacheRefresh() throws Exception { + final String configMetadataStoreConnectString = + WhiteboxImpl.getInternalState(pulsar.getConfigurationMetadataStore(), "zkConnectString"); + final ZooKeeper anotherZKCli = new ZooKeeper(configMetadataStoreConnectString, 5000, null); + // Set policy of auto create topic to PARTITIONED. + final String ns = defaultTenant + "/ns_" + UUID.randomUUID().toString().replaceAll("-", ""); + final TopicName topicName1 = TopicName.get("persistent://" + ns + "/tp1"); + final TopicName topicName2 = TopicName.get("persistent://" + ns + "/tp2"); + admin.namespaces().createNamespace(ns); + AutoTopicCreationOverride autoTopicCreationOverride = + new AutoTopicCreationOverrideImpl.AutoTopicCreationOverrideImplBuilder().allowAutoTopicCreation(true) + .topicType(TopicType.PARTITIONED.toString()) + .defaultNumPartitions(3).build(); + admin.namespaces().setAutoTopicCreationAsync(ns, autoTopicCreationOverride); + // Make the cache of namespace policy is valid. + admin.namespaces().getAutoSubscriptionCreation(ns); + // Trigger the zk node "/admin/partitioned-topics/{namespace}/persistent" created. + admin.topics().createPartitionedTopic(topicName1.toString(), 2); + admin.topics().deletePartitionedTopic(topicName1.toString()); + + // Since there is no partitioned metadata created, the partitions count of metadata will be 0. + PartitionedTopicMetadata partitionedTopicMetadata1 = + pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync(topicName2).get(); + assertEquals(partitionedTopicMetadata1.partitions, 0); + + // Create the partitioned metadata by another zk client. + // Make a error to make the cache could not update. + makeLocalMetadataStoreKeepReconnect(); + anotherZKCli.create("/admin/partitioned-topics/" + ns + "/persistent/" + topicName2.getLocalName(), + "{\"partitions\":3}".getBytes(StandardCharsets.UTF_8), + ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + stopLocalMetadataStoreAlwaysReconnect(); + + // Get the partitioned metadata from cache, there is 90% chance that partitions count of metadata is 0. + PartitionedTopicMetadata partitionedTopicMetadata2 = + pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync(topicName2).get(); + // Note: If you want to reproduce the issue, you can perform validation on the next line. + // assertEquals(partitionedTopicMetadata2.partitions, 0); + + // Verify the new method will return a correct result. + PartitionedTopicMetadata partitionedTopicMetadata3 = + pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync(topicName2, true).get(); + assertEquals(partitionedTopicMetadata3.partitions, 3); + + // cleanup. + admin.topics().deletePartitionedTopic(topicName2.toString()); + anotherZKCli.close(); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java index 24e38438c5329..9789c9d725c0d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java @@ -65,6 +65,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.http.HttpResponse; @@ -72,8 +73,10 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerServiceException.PersistenceException; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.stats.prometheus.PrometheusRawMetricsProvider; import org.apache.pulsar.client.admin.BrokerStats; @@ -88,6 +91,7 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionMode; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.ConnectionPool; @@ -106,7 +110,9 @@ import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.util.netty.EventLoopUtil; +import org.apache.pulsar.compaction.Compactor; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; @@ -116,11 +122,6 @@ @Test(groups = "broker") public class BrokerServiceTest extends BrokerTestBase { - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/certificate/server.crt"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/certificate/server.key"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/certificate/client.crt"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/certificate/client.key"; - @BeforeClass @Override protected void setup() throws Exception { @@ -675,8 +676,8 @@ public void testTlsEnabled() throws Exception { conf.setAuthenticationEnabled(false); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setNumExecutorThreadPoolSize(5); restartBroker(); @@ -730,7 +731,7 @@ public void testTlsEnabled() throws Exception { // Case 4: Access with TLS (Use trusted certificates) try { pulsarClient = PulsarClient.builder().serviceUrl(brokerUrlTls.toString()).enableTls(true) - .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(TLS_SERVER_CERT_FILE_PATH) + .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(BROKER_CERT_FILE_PATH) .statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @@ -754,8 +755,8 @@ public void testTlsEnabledWithoutNonTlsServicePorts() throws Exception { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePort(Optional.empty()); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setNumExecutorThreadPoolSize(5); restartBroker(); @@ -791,15 +792,15 @@ public void testTlsAuthAllowInsecure() throws Exception { conf.setAuthenticationProviders(providers); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(true); conf.setNumExecutorThreadPoolSize(5); restartBroker(); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); PulsarClient pulsarClient = null; @@ -854,15 +855,15 @@ public void testTlsAuthDisallowInsecure() throws Exception { conf.setAuthenticationProviders(providers); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(false); conf.setNumExecutorThreadPoolSize(5); restartBroker(); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); PulsarClient pulsarClient = null; @@ -916,16 +917,16 @@ public void testTlsAuthUseTrustCert() throws Exception { conf.setAuthenticationProviders(providers); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(false); - conf.setTlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setNumExecutorThreadPoolSize(5); restartBroker(); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); PulsarClient pulsarClient = null; @@ -1146,7 +1147,7 @@ public void testTopicLoadingOnDisableNamespaceBundle() throws Exception { // try to create topic which should fail as bundle is disable CompletableFuture> futureResult = pulsar.getBrokerService() - .loadOrCreatePersistentTopic(topicName, true, null); + .loadOrCreatePersistentTopic(topicName, true, null, null); try { futureResult.get(); @@ -1159,6 +1160,69 @@ public void testTopicLoadingOnDisableNamespaceBundle() throws Exception { } } + @Test + public void testCheckInactiveSubscriptionsShouldNotDeleteCompactionCursor() throws Exception { + String namespace = "prop/test"; + + // set up broker set compaction threshold. + cleanup(); + conf.setBrokerServiceCompactionThresholdInBytes(8); + setup(); + + try { + admin.namespaces().createNamespace(namespace); + } catch (PulsarAdminException.ConflictException e) { + // Ok.. (if test fails intermittently and namespace is already created) + } + + // set enable subscription expiration. + admin.namespaces().setSubscriptionExpirationTime(namespace, 1); + + String compactionInactiveTestTopic = "persistent://prop/test/testCompactionCursorShouldNotDelete"; + + admin.topics().createNonPartitionedTopic(compactionInactiveTestTopic); + + CompletableFuture> topicCf = + pulsar.getBrokerService().getTopic(compactionInactiveTestTopic, true); + + Optional topicOptional = topicCf.get(); + assertTrue(topicOptional.isPresent()); + + PersistentTopic topic = (PersistentTopic) topicOptional.get(); + + PersistentSubscription sub = (PersistentSubscription) topic.getSubscription(Compactor.COMPACTION_SUBSCRIPTION); + assertNotNull(sub); + + topic.checkCompaction(); + + Field currentCompaction = PersistentTopic.class.getDeclaredField("currentCompaction"); + currentCompaction.setAccessible(true); + CompletableFuture compactionFuture = (CompletableFuture)currentCompaction.get(topic); + + compactionFuture.get(); + + ManagedCursorImpl cursor = (ManagedCursorImpl) sub.getCursor(); + + // make cursor last active time to very small to check if it will be deleted + Field cursorLastActiveField = ManagedCursorImpl.class.getDeclaredField("lastActive"); + cursorLastActiveField.setAccessible(true); + cursorLastActiveField.set(cursor, 0); + + // replace origin object. so we can check if subscription is deleted. + PersistentSubscription spySubscription = Mockito.spy(sub); + topic.getSubscriptions().put(Compactor.COMPACTION_SUBSCRIPTION, spySubscription); + + // trigger inactive check. + topic.checkInactiveSubscriptions(); + + // Compaction subscription should not call delete method. + Mockito.verify(spySubscription, Mockito.never()).delete(); + + // check if the subscription exist. + assertNotNull(topic.getSubscription(Compactor.COMPACTION_SUBSCRIPTION)); + + } + /** * Verifies brokerService should not have deadlock and successfully remove topic from topicMap on topic-failure and * it should not introduce deadlock while performing it. @@ -1469,8 +1533,10 @@ public void testIsSystemTopic() { assertTrue(brokerService.isSystemTopic(TRANSACTION_COORDINATOR_ASSIGN)); assertTrue(brokerService.isSystemTopic(TRANSACTION_COORDINATOR_LOG)); - NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfig()); - NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV1 = NamespaceService + .getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV2 = NamespaceService + .getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); assertTrue(brokerService.isSystemTopic("persistent://" + heartbeatNamespaceV1.toString() + "/healthcheck")); assertTrue(brokerService.isSystemTopic(heartbeatNamespaceV2.toString() + "/healthcheck")); } @@ -1520,4 +1586,64 @@ public void testDynamicConfigurationsForceDeleteTenantAllowed() throws Exception assertTrue(conf.isForceDeleteTenantAllowed()); }); } + + @Test + public void testIsSystemTopicAllowAutoTopicCreationAsync() throws Exception { + BrokerService brokerService = pulsar.getBrokerService(); + assertFalse(brokerService.isAllowAutoTopicCreationAsync( + ServiceUnitStateChannelImpl.TOPIC).get()); + assertTrue(brokerService.isAllowAutoTopicCreationAsync( + "persistent://pulsar/system/my-system-topic").get()); + } + + @Test + public void testDuplicateAcknowledgement() throws Exception { + final String ns = "prop/ns-test"; + + admin.namespaces().createNamespace(ns, 2); + final String topicName = "persistent://prop/ns-test/duplicated-acknowledgement-test"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .create(); + @Cleanup + Consumer consumer1 = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName("sub-1") + .acknowledgmentGroupTime(0, TimeUnit.SECONDS) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true) + .subscribe(); + producer.send("1".getBytes(StandardCharsets.UTF_8)); + Message message = consumer1.receive(); + consumer1.acknowledge(message); + consumer1.acknowledge(message); + assertEquals(admin.topics().getStats(topicName).getSubscriptions() + .get("sub-1").getUnackedMessages(), 0); + } + + @Test + public void testUnsubscribeNonDurableSub() throws Exception { + final String ns = "prop/ns-test"; + final String topic = ns + "/testUnsubscribeNonDurableSub"; + + admin.namespaces().createNamespace(ns, 2); + admin.topics().createPartitionedTopic(String.format("persistent://%s", topic), 1); + + pulsarClient.newProducer(Schema.STRING).topic(topic).create().close(); + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.STRING) + .topic(topic) + .subscriptionMode(SubscriptionMode.NonDurable) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionName("sub1") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + try { + consumer.unsubscribe(); + } catch (Exception ex) { + fail("Unsubscribe failed"); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CanReconnectZKClientPulsarServiceBaseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CanReconnectZKClientPulsarServiceBaseTest.java new file mode 100644 index 0000000000000..bc6df685ffcd7 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CanReconnectZKClientPulsarServiceBaseTest.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import com.google.common.collect.Sets; +import io.netty.channel.Channel; +import java.net.URL; +import java.nio.channels.SelectionKey; +import java.util.Collections; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.pulsar.tests.TestRetrySupport; +import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.apache.pulsar.zookeeper.ZookeeperServerTest; +import org.apache.zookeeper.ClientCnxn; +import org.apache.zookeeper.ZooKeeper; +import org.awaitility.reflect.WhiteboxImpl; + +@Slf4j +public abstract class CanReconnectZKClientPulsarServiceBaseTest extends TestRetrySupport { + + protected final String defaultTenant = "public"; + protected final String defaultNamespace = defaultTenant + "/default"; + protected int numberOfBookies = 3; + protected final String clusterName = "r1"; + protected URL url; + protected URL urlTls; + protected ServiceConfiguration config = new ServiceConfiguration(); + protected ZookeeperServerTest brokerConfigZk; + protected LocalBookkeeperEnsemble bkEnsemble; + protected PulsarService pulsar; + protected BrokerService broker; + protected PulsarAdmin admin; + protected PulsarClient client; + protected ZooKeeper localZkOfBroker; + protected Object localMetaDataStoreClientCnx; + protected final AtomicBoolean LocalMetadataStoreInReconnectFinishSignal = new AtomicBoolean(); + protected void startZKAndBK() throws Exception { + // Start ZK. + brokerConfigZk = new ZookeeperServerTest(0); + brokerConfigZk.start(); + + // Start BK. + bkEnsemble = new LocalBookkeeperEnsemble(numberOfBookies, 0, () -> 0); + bkEnsemble.start(); + } + + protected void startBrokers() throws Exception { + // Start brokers. + setConfigDefaults(config, clusterName, bkEnsemble, brokerConfigZk); + pulsar = new PulsarService(config); + pulsar.start(); + broker = pulsar.getBrokerService(); + ZKMetadataStore zkMetadataStore = (ZKMetadataStore) pulsar.getLocalMetadataStore(); + localZkOfBroker = zkMetadataStore.getZkClient(); + ClientCnxn cnxn = WhiteboxImpl.getInternalState(localZkOfBroker, "cnxn"); + Object sendThread = WhiteboxImpl.getInternalState(cnxn, "sendThread"); + localMetaDataStoreClientCnx = WhiteboxImpl.getInternalState(sendThread, "clientCnxnSocket"); + + url = new URL(pulsar.getWebServiceAddress()); + urlTls = new URL(pulsar.getWebServiceAddressTls()); + admin = PulsarAdmin.builder().serviceHttpUrl(url.toString()).build(); + client = PulsarClient.builder().serviceUrl(url.toString()).build(); + } + + protected void makeLocalMetadataStoreKeepReconnect() throws Exception { + if (!LocalMetadataStoreInReconnectFinishSignal.compareAndSet(false, true)) { + throw new RuntimeException("Local metadata store is already keeping reconnect"); + } + if (localMetaDataStoreClientCnx.getClass().getSimpleName().equals("ClientCnxnSocketNIO")) { + makeLocalMetadataStoreKeepReconnectNIO(); + } else { + // ClientCnxnSocketNetty. + makeLocalMetadataStoreKeepReconnectNetty(); + } + } + + protected void makeLocalMetadataStoreKeepReconnectNIO() { + new Thread(() -> { + while (LocalMetadataStoreInReconnectFinishSignal.get()) { + try { + SelectionKey sockKey = WhiteboxImpl.getInternalState(localMetaDataStoreClientCnx, "sockKey"); + if (sockKey != null) { + sockKey.channel().close(); + } + // Prevents high cpu usage. + Thread.sleep(5); + } catch (Exception e) { + log.error("Try close the ZK connection of local metadata store failed: {}", e.toString()); + } + } + }).start(); + } + + protected void makeLocalMetadataStoreKeepReconnectNetty() { + new Thread(() -> { + while (LocalMetadataStoreInReconnectFinishSignal.get()) { + try { + Channel channel = WhiteboxImpl.getInternalState(localMetaDataStoreClientCnx, "channel"); + if (channel != null) { + channel.close(); + } + // Prevents high cpu usage. + Thread.sleep(5); + } catch (Exception e) { + log.error("Try close the ZK connection of local metadata store failed: {}", e.toString()); + } + } + }).start(); + } + + protected void stopLocalMetadataStoreAlwaysReconnect() { + LocalMetadataStoreInReconnectFinishSignal.set(false); + } + + protected void createDefaultTenantsAndClustersAndNamespace() throws Exception { + admin.clusters().createCluster(clusterName, ClusterData.builder() + .serviceUrl(url.toString()) + .serviceUrlTls(urlTls.toString()) + .brokerServiceUrl(pulsar.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(false) + .build()); + + admin.tenants().createTenant(defaultTenant, new TenantInfoImpl(Collections.emptySet(), + Sets.newHashSet(clusterName))); + + admin.namespaces().createNamespace(defaultNamespace, Sets.newHashSet(clusterName)); + } + + @Override + protected void setup() throws Exception { + incrementSetupNumber(); + + log.info("--- Starting OneWayReplicatorTestBase::setup ---"); + + startZKAndBK(); + + startBrokers(); + + createDefaultTenantsAndClustersAndNamespace(); + + Thread.sleep(100); + log.info("--- OneWayReplicatorTestBase::setup completed ---"); + } + + private void setConfigDefaults(ServiceConfiguration config, String clusterName, + LocalBookkeeperEnsemble bookkeeperEnsemble, ZookeeperServerTest brokerConfigZk) { + config.setClusterName(clusterName); + config.setAdvertisedAddress("localhost"); + config.setWebServicePort(Optional.of(0)); + config.setWebServicePortTls(Optional.of(0)); + config.setMetadataStoreUrl("zk:127.0.0.1:" + bookkeeperEnsemble.getZookeeperPort()); + config.setConfigurationMetadataStoreUrl("zk:127.0.0.1:" + brokerConfigZk.getZookeeperPort() + "/foo"); + config.setBrokerDeleteInactiveTopicsEnabled(false); + config.setBrokerDeleteInactiveTopicsFrequencySeconds(60); + config.setBrokerShutdownTimeoutMs(0L); + config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); + config.setBrokerServicePort(Optional.of(0)); + config.setBrokerServicePortTls(Optional.of(0)); + config.setBacklogQuotaCheckIntervalInSeconds(5); + config.setDefaultNumberOfNamespaceBundles(1); + config.setAllowAutoTopicCreationType(TopicType.NON_PARTITIONED); + config.setEnableReplicatedSubscriptions(true); + config.setReplicatedSubscriptionsSnapshotFrequencyMillis(1000); + } + + @Override + protected void cleanup() throws Exception { + markCurrentSetupNumberCleaned(); + log.info("--- Shutting down ---"); + + stopLocalMetadataStoreAlwaysReconnect(); + + // Stop brokers. + client.close(); + admin.close(); + if (pulsar != null) { + pulsar.close(); + } + + // Stop ZK and BK. + bkEnsemble.stop(); + brokerConfigZk.stop(); + + // Reset configs. + config = new ServiceConfiguration(); + setConfigDefaults(config, clusterName, bkEnsemble, brokerConfigZk); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java index df4f66c43d2b4..b36bf33aff441 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java @@ -25,11 +25,12 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; - +import com.google.common.collect.Sets; import java.lang.reflect.Method; import java.net.URL; +import java.util.Optional; import java.util.concurrent.TimeUnit; - +import lombok.Cleanup; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -50,11 +51,7 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import com.google.common.collect.Sets; - -import lombok.Cleanup; - -@Test(groups = "broker") +@Test(groups = "cluster-migration") public class ClusterMigrationTest { private static final Logger log = LoggerFactory.getLogger(ClusterMigrationTest.class); @@ -202,9 +199,13 @@ public void setup() throws Exception { protected void cleanup() throws Exception { log.info("--- Shutting down ---"); broker1.cleanup(); + admin1.close(); broker2.cleanup(); + admin2.close(); broker3.cleanup(); + admin3.close(); broker4.cleanup(); + admin4.close(); } @BeforeMethod(alwaysRun = true) @@ -399,7 +400,7 @@ public void testClusterMigrationWithReplicationBacklog(boolean persistent, Subsc assertEquals(topic1.getReplicators().size(), 1); // stop service in the replication cluster to build replication backlog - broker3.cleanup(); + broker3.stop(); retryStrategically((test) -> broker3.getPulsarService() == null, 10, 1000); assertNull(pulsar3.getBrokerService()); @@ -477,6 +478,14 @@ protected void setup() throws Exception { super.setupWithClusterName(clusterName); } + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + this.conf.setWebServicePortTls(Optional.of(0)); + this.conf.setBrokerServicePortTls(Optional.of(0)); + } + + public PulsarService getPulsarService() { return pulsar; } @@ -485,9 +494,13 @@ public String getClusterName() { return configClusterName; } + public void stop() throws Exception { + stopBroker(); + } + @Override protected void cleanup() throws Exception { - stopBroker(); + internalCleanup(); } public void restart() throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java index dbca31416bb9d..48311c57338b5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java @@ -21,18 +21,18 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - -import org.apache.pulsar.broker.service.BrokerServiceException.ConsumerAssignException; -import org.apache.pulsar.client.api.Range; -import org.testng.Assert; -import org.testng.annotations.Test; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.pulsar.broker.service.BrokerServiceException.ConsumerAssignException; +import org.apache.pulsar.client.api.Range; +import org.testng.Assert; +import org.testng.annotations.Test; @Test(groups = "broker") public class ConsistentHashingStickyKeyConsumerSelectorTest { @@ -154,17 +154,17 @@ public void testGetConsumerKeyHashRanges() throws BrokerServiceException.Consume } Map> expectedResult = new HashMap<>(); expectedResult.put(consumers.get(0), Arrays.asList( - Range.of(0, 330121749), - Range.of(330121750, 618146114), - Range.of(1797637922, 1976098885))); + Range.of(119056335, 242013991), + Range.of(722195657, 1656011842), + Range.of(1707482098, 1914695766))); expectedResult.put(consumers.get(1), Arrays.asList( - Range.of(938427576, 1094135919), - Range.of(1138613629, 1342907082), - Range.of(1342907083, 1797637921))); + Range.of(0, 90164503), + Range.of(90164504, 119056334), + Range.of(382436668, 722195656))); expectedResult.put(consumers.get(2), Arrays.asList( - Range.of(618146115, 772640562), - Range.of(772640563, 938427575), - Range.of(1094135920, 1138613628))); + Range.of(242013992, 242377547), + Range.of(242377548, 382436667), + Range.of(1656011843, 1707482097))); for (Map.Entry> entry : selector.getConsumerKeyHashRanges().entrySet()) { System.out.println(entry.getValue()); Assert.assertEquals(entry.getValue(), expectedResult.get(entry.getKey())); @@ -172,4 +172,48 @@ public void testGetConsumerKeyHashRanges() throws BrokerServiceException.Consume } Assert.assertEquals(expectedResult.size(), 0); } + + // reproduces https://github.com/apache/pulsar/issues/22050 + @Test + public void shouldNotCollideWithConsumerNameEndsWithNumber() { + ConsistentHashingStickyKeyConsumerSelector selector = new ConsistentHashingStickyKeyConsumerSelector(12); + List consumerName = Arrays.asList("consumer1", "consumer11"); + List consumers = new ArrayList<>(); + for (String s : consumerName) { + Consumer consumer = mock(Consumer.class); + when(consumer.consumerName()).thenReturn(s); + selector.addConsumer(consumer); + consumers.add(consumer); + } + Map rangeToConsumer = new HashMap<>(); + for (Map.Entry> entry : selector.getConsumerKeyHashRanges().entrySet()) { + for (Range range : entry.getValue()) { + Consumer previous = rangeToConsumer.put(range, entry.getKey()); + if (previous != null) { + Assert.fail("Ranges are colliding between " + previous.consumerName() + " and " + entry.getKey() + .consumerName()); + } + } + } + } + + @Test + public void shouldRemoveConsumersFromConsumerKeyHashRanges() { + ConsistentHashingStickyKeyConsumerSelector selector = new ConsistentHashingStickyKeyConsumerSelector(12); + List consumers = IntStream.range(1, 100).mapToObj(i -> "consumer" + i) + .map(consumerName -> { + Consumer consumer = mock(Consumer.class); + when(consumer.consumerName()).thenReturn(consumerName); + return consumer; + }).collect(Collectors.toList()); + + // when consumers are added + consumers.forEach(selector::addConsumer); + // then each consumer should have a range + Assert.assertEquals(selector.getConsumerKeyHashRanges().size(), consumers.size()); + // when consumers are removed + consumers.forEach(selector::removeConsumer); + // then there should be no mapping remaining + Assert.assertEquals(selector.getConsumerKeyHashRanges().size(), 0); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java index 2f128fe6270a5..33e797fcb219f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java @@ -19,9 +19,18 @@ package org.apache.pulsar.broker.service; import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; -import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.ClientBuilder; +import org.apache.pulsar.client.api.InjectedClientCnxClientBuilder; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ClientBuilderImpl; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.policies.data.SubscriptionStats; @@ -32,10 +41,6 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.net.InetSocketAddress; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; - @Test(groups = "broker") public class EnableProxyProtocolTest extends BrokerTestBase { @@ -46,6 +51,15 @@ protected void setup() throws Exception { super.baseSetup(); } + protected PulsarClient newPulsarClient(String url, int intervalInSecs) throws PulsarClientException { + ClientBuilder clientBuilder = + PulsarClient.builder() + .serviceUrl(url) + .statsInterval(intervalInSecs, TimeUnit.SECONDS); + customizeNewPulsarClientBuilder(clientBuilder); + return createNewPulsarClient(clientBuilder); + } + @AfterClass(alwaysRun = true) @Override protected void cleanup() throws Exception { @@ -53,7 +67,7 @@ protected void cleanup() throws Exception { } @Test - public void testSimpleProduceAndConsume() throws PulsarClientException { + public void testSimpleProduceAndConsume() throws Exception { final String namespace = "prop/ns-abc"; final String topicName = "persistent://" + namespace + "/testSimpleProduceAndConsume"; final String subName = "my-subscriber-name"; @@ -76,30 +90,104 @@ public void testSimpleProduceAndConsume() throws PulsarClientException { } Assert.assertEquals(received, messages); + + // cleanup. + org.apache.pulsar.broker.service.Consumer serverConsumer = pulsar.getBrokerService().getTopicReference(topicName) + .get().getSubscription(subName).getConsumers().get(0); + ((ServerCnx) serverConsumer.cnx()).close(); + consumer.close(); + producer.close(); + admin.topics().delete(topicName); } @Test - public void testProxyProtocol() throws PulsarClientException, ExecutionException, InterruptedException, PulsarAdminException { + public void testProxyProtocol() throws Exception { final String namespace = "prop/ns-abc"; final String topicName = "persistent://" + namespace + "/testProxyProtocol"; final String subName = "my-subscriber-name"; - PulsarClientImpl client = (PulsarClientImpl) pulsarClient; - CompletableFuture cnx = client.getCnxPool().getConnection(InetSocketAddress.createUnresolved("localhost", pulsar.getBrokerListenPort().get())); - // Simulate the proxy protcol message - cnx.get().ctx().channel().writeAndFlush(Unpooled.copiedBuffer("PROXY TCP4 198.51.100.22 203.0.113.7 35646 80\r\n".getBytes())); - pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) - .subscribe(); - org.apache.pulsar.broker.service.Consumer c = pulsar.getBrokerService().getTopicReference(topicName).get().getSubscription(subName).getConsumers().get(0); - Awaitility.await().untilAsserted(() -> Assert.assertTrue(c.cnx().hasHAProxyMessage())); + + // Create a client that injected the protocol implementation. + ClientBuilderImpl clientBuilder = (ClientBuilderImpl) PulsarClient.builder().serviceUrl(lookupUrl.toString()); + PulsarClientImpl protocolClient = InjectedClientCnxClientBuilder.create(clientBuilder, + (conf, eventLoopGroup) -> new ClientCnx(conf, eventLoopGroup) { + public void channelActive(ChannelHandlerContext ctx) throws Exception { + byte[] bs = "PROXY TCP4 198.51.100.22 203.0.113.7 35646 80\r\n".getBytes(); + ctx.writeAndFlush(Unpooled.copiedBuffer(bs)); + super.channelActive(ctx); + } + }); + + // Verify the addr can be handled correctly. + testPubAndSub(topicName, subName, "198.51.100.22:35646", protocolClient); + + // cleanup. + admin.topics().delete(topicName); + } + + @Test(timeOut = 10000) + public void testPubSubWhenSlowNetwork() throws Exception { + final String namespace = "prop/ns-abc"; + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + namespace + "/tp"); + final String subName = "my-subscriber-name"; + + // Create a client that injected the protocol implementation. + ClientBuilderImpl clientBuilder = (ClientBuilderImpl) PulsarClient.builder().serviceUrl(lookupUrl.toString()); + PulsarClientImpl protocolClient = InjectedClientCnxClientBuilder.create(clientBuilder, + (conf, eventLoopGroup) -> new ClientCnx(conf, eventLoopGroup) { + public void channelActive(ChannelHandlerContext ctx) throws Exception { + Thread task = new Thread(() -> { + try { + byte[] bs1 = "PROXY".getBytes(); + byte[] bs2 = " TCP4 198.51.100.22 203.0.113.7 35646 80\r\n".getBytes(); + ctx.writeAndFlush(Unpooled.copiedBuffer(bs1)); + Thread.sleep(100); + ctx.writeAndFlush(Unpooled.copiedBuffer(bs2)); + super.channelActive(ctx); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + task.start(); + } + }); + + // Verify the addr can be handled correctly. + testPubAndSub(topicName, subName, "198.51.100.22:35646", protocolClient); + + // cleanup. + admin.topics().delete(topicName); + } + + private void testPubAndSub(String topicName, String subName, String expectedHostAndPort, + PulsarClientImpl pulsarClient) throws Exception { + // Verify: subscribe + org.apache.pulsar.client.api.Consumer clientConsumer = pulsarClient.newConsumer(Schema.STRING).topic(topicName) + .subscriptionName(subName).subscribe(); + org.apache.pulsar.broker.service.Consumer serverConsumer = pulsar.getBrokerService() + .getTopicReference(topicName).get().getSubscription(subName).getConsumers().get(0); + Awaitility.await().untilAsserted(() -> Assert.assertTrue(serverConsumer.cnx().hasHAProxyMessage())); TopicStats topicStats = admin.topics().getStats(topicName); Assert.assertEquals(topicStats.getSubscriptions().size(), 1); SubscriptionStats subscriptionStats = topicStats.getSubscriptions().get(subName); Assert.assertEquals(subscriptionStats.getConsumers().size(), 1); - Assert.assertEquals(subscriptionStats.getConsumers().get(0).getAddress(), "198.51.100.22:35646"); + Assert.assertEquals(subscriptionStats.getConsumers().get(0).getAddress(), expectedHostAndPort); + + // Verify: producer register. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); + TopicStats topicStats2 = admin.topics().getStats(topicName); + Assert.assertEquals(topicStats2.getPublishers().size(), 1); + Assert.assertEquals(topicStats2.getPublishers().get(0).getAddress(), expectedHostAndPort); + + // Verify: Pub & Sub + producer.send("1"); + Message msg = clientConsumer.receive(2, TimeUnit.SECONDS); + Assert.assertNotNull(msg); + Assert.assertEquals(msg.getValue(), "1"); + clientConsumer.acknowledge(msg); - pulsarClient.newProducer().topic(topicName).create(); - topicStats = admin.topics().getStats(topicName); - Assert.assertEquals(topicStats.getPublishers().size(), 1); - Assert.assertEquals(topicStats.getPublishers().get(0).getAddress(), "198.51.100.22:35646"); + // cleanup. + ((ServerCnx) serverConsumer.cnx()).close(); + producer.close(); + clientConsumer.close(); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java index ebd53d65a7465..765a33968b98e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java @@ -35,8 +35,6 @@ import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.impl.ConsumerImpl; -import org.apache.pulsar.client.impl.MultiTopicsConsumerImpl; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.naming.TopicVersion; @@ -356,12 +354,9 @@ public void testDeleteWhenNoBacklogs() throws Exception { admin.topics().skipAllMessages(topic, "sub"); Awaitility.await().untilAsserted(() -> { - Assert.assertFalse(consumer.isConnected()); - final List consumers = ((MultiTopicsConsumerImpl) consumer2).getConsumers(); - consumers.forEach(c -> Assert.assertFalse(c.isConnected())); - Assert.assertFalse(consumer2.isConnected()); - Assert.assertFalse(admin.topics().getList("prop/ns-abc").contains(topic)); - Assert.assertFalse(admin.topics().getList("prop/ns-abc").contains(topic2)); + final List topics = admin.topics().getList("prop/ns-abc"); + Assert.assertFalse(topics.contains(topic)); + Assert.assertFalse(topics.contains(topic2)); }); consumer.close(); consumer2.close(); @@ -602,10 +597,12 @@ public void testHealthTopicInactiveNotClean() throws Exception { conf.setBrokerDeleteInactiveTopicsFrequencySeconds(1); super.baseSetup(); // init topic - NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV1 = NamespaceService + .getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfig()); final String healthCheckTopicV1 = "persistent://" + heartbeatNamespaceV1 + "/healthcheck"; - NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV2 = NamespaceService + .getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); final String healthCheckTopicV2 = "persistent://" + heartbeatNamespaceV2 + "/healthcheck"; admin.brokers().healthcheck(TopicVersion.V1); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java new file mode 100644 index 0000000000000..e76fd823a4b16 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import java.lang.reflect.Field; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.broker.service.persistent.PersistentReplicator; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ProducerImpl; +import org.apache.pulsar.common.policies.data.TopicStats; +import org.junit.Assert; +import org.awaitility.Awaitility; +import org.mockito.Mockito; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class OneWayReplicatorTest extends OneWayReplicatorTestBase { + + @Override + @BeforeClass(alwaysRun = true, timeOut = 300000) + public void setup() throws Exception { + super.setup(); + } + + @Override + @AfterClass(alwaysRun = true, timeOut = 300000) + public void cleanup() throws Exception { + super.cleanup(); + } + + private void waitReplicatorStarted(String topicName) { + Awaitility.await().untilAsserted(() -> { + Optional topicOptional2 = pulsar2.getBrokerService().getTopic(topicName, false).get(); + assertTrue(topicOptional2.isPresent()); + PersistentTopic persistentTopic2 = (PersistentTopic) topicOptional2.get(); + assertFalse(persistentTopic2.getProducers().isEmpty()); + }); + } + + /** + * Override "AbstractReplicator.producer" by {@param producer} and return the original value. + */ + private ProducerImpl overrideProducerForReplicator(AbstractReplicator replicator, ProducerImpl newProducer) + throws Exception { + Field producerField = AbstractReplicator.class.getDeclaredField("producer"); + producerField.setAccessible(true); + ProducerImpl originalValue = (ProducerImpl) producerField.get(replicator); + synchronized (replicator) { + producerField.set(replicator, newProducer); + } + return originalValue; + } + + @Test + public void testReplicatorProducerStatInTopic() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + final String subscribeName = "subscribe_1"; + final byte[] msgValue = "test".getBytes(); + + admin1.topics().createNonPartitionedTopic(topicName); + admin2.topics().createNonPartitionedTopic(topicName); + admin1.topics().createSubscription(topicName, subscribeName, MessageId.earliest); + admin2.topics().createSubscription(topicName, subscribeName, MessageId.earliest); + + // Verify replicator works. + Producer producer1 = client1.newProducer().topic(topicName).create(); + Consumer consumer2 = client2.newConsumer().topic(topicName).subscriptionName(subscribeName).subscribe(); + producer1.newMessage().value(msgValue).send(); + pulsar1.getBrokerService().checkReplicationPolicies(); + assertEquals(consumer2.receive(10, TimeUnit.SECONDS).getValue(), msgValue); + + // Verify there has one item in the attribute "publishers" or "replications" + TopicStats topicStats2 = admin2.topics().getStats(topicName); + Assert.assertTrue(topicStats2.getPublishers().size() + topicStats2.getReplication().size() > 0); + + // cleanup. + consumer2.close(); + producer1.close(); + cleanupTopics(() -> { + admin1.topics().delete(topicName); + admin2.topics().delete(topicName); + }); + } + + @Test + public void testCreateRemoteConsumerFirst() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + Producer producer1 = client1.newProducer(Schema.STRING).topic(topicName).create(); + + // The topic in cluster2 has a replicator created producer(schema Auto_Produce), but does not have any schema。 + // Verify: the consumer of this cluster2 can create successfully. + Consumer consumer2 = client2.newConsumer(Schema.STRING).topic(topicName).subscriptionName("s1") + .subscribe();; + // Wait for replicator started. + waitReplicatorStarted(topicName); + // cleanup. + producer1.close(); + consumer2.close(); + cleanupTopics(() -> { + admin1.topics().delete(topicName); + admin2.topics().delete(topicName); + }); + } + + @Test + public void testTopicCloseWhenInternalProducerCloseErrorOnce() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + admin1.topics().createNonPartitionedTopic(topicName); + // Wait for replicator started. + waitReplicatorStarted(topicName); + PersistentTopic persistentTopic = + (PersistentTopic) pulsar1.getBrokerService().getTopic(topicName, false).join().get(); + PersistentReplicator replicator = + (PersistentReplicator) persistentTopic.getReplicators().values().iterator().next(); + // Mock an error when calling "replicator.disconnect()" + ProducerImpl mockProducer = Mockito.mock(ProducerImpl.class); + Mockito.when(mockProducer.closeAsync()).thenReturn(CompletableFuture.failedFuture(new Exception("mocked ex"))); + ProducerImpl originalProducer = overrideProducerForReplicator(replicator, mockProducer); + // Verify: since the "replicator.producer.closeAsync()" will retry after it failed, the topic unload should be + // successful. + admin1.topics().unload(topicName); + // Verify: After "replicator.producer.closeAsync()" retry again, the "replicator.producer" will be closed + // successful. + overrideProducerForReplicator(replicator, originalProducer); + Awaitility.await().untilAsserted(() -> { + Assert.assertFalse(replicator.isConnected()); + }); + // cleanup. + cleanupTopics(() -> { + admin1.topics().delete(topicName); + admin2.topics().delete(topicName); + }); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTestBase.java new file mode 100644 index 0000000000000..33620716288af --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTestBase.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import com.google.common.collect.Sets; +import java.net.URL; +import java.util.Collections; +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.tests.TestRetrySupport; +import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.apache.pulsar.zookeeper.ZookeeperServerTest; + +@Slf4j +public abstract class OneWayReplicatorTestBase extends TestRetrySupport { + + protected final String defaultTenant = "public"; + protected final String defaultNamespace = defaultTenant + "/default"; + + protected final String cluster1 = "r1"; + protected URL url1; + protected URL urlTls1; + protected ServiceConfiguration config1 = new ServiceConfiguration(); + protected ZookeeperServerTest brokerConfigZk1; + protected LocalBookkeeperEnsemble bkEnsemble1; + protected PulsarService pulsar1; + protected BrokerService ns1; + protected PulsarAdmin admin1; + protected PulsarClient client1; + + protected URL url2; + protected URL urlTls2; + protected final String cluster2 = "r2"; + protected ServiceConfiguration config2 = new ServiceConfiguration(); + protected ZookeeperServerTest brokerConfigZk2; + protected LocalBookkeeperEnsemble bkEnsemble2; + protected PulsarService pulsar2; + protected BrokerService ns2; + protected PulsarAdmin admin2; + protected PulsarClient client2; + + protected void startZKAndBK() throws Exception { + // Start ZK. + brokerConfigZk1 = new ZookeeperServerTest(0); + brokerConfigZk1.start(); + brokerConfigZk2 = new ZookeeperServerTest(0); + brokerConfigZk2.start(); + + // Start BK. + bkEnsemble1 = new LocalBookkeeperEnsemble(3, 0, () -> 0); + bkEnsemble1.start(); + bkEnsemble2 = new LocalBookkeeperEnsemble(3, 0, () -> 0); + bkEnsemble2.start(); + } + + protected void startBrokers() throws Exception { + // Start brokers. + setConfigDefaults(config1, cluster1, bkEnsemble1, brokerConfigZk1); + pulsar1 = new PulsarService(config1); + pulsar1.start(); + ns1 = pulsar1.getBrokerService(); + + url1 = new URL(pulsar1.getWebServiceAddress()); + urlTls1 = new URL(pulsar1.getWebServiceAddressTls()); + admin1 = PulsarAdmin.builder().serviceHttpUrl(url1.toString()).build(); + client1 = PulsarClient.builder().serviceUrl(url1.toString()).build(); + + // Start region 2 + setConfigDefaults(config2, cluster2, bkEnsemble2, brokerConfigZk2); + pulsar2 = new PulsarService(config2); + pulsar2.start(); + ns2 = pulsar2.getBrokerService(); + + url2 = new URL(pulsar2.getWebServiceAddress()); + urlTls2 = new URL(pulsar2.getWebServiceAddressTls()); + admin2 = PulsarAdmin.builder().serviceHttpUrl(url2.toString()).build(); + client2 = PulsarClient.builder().serviceUrl(url2.toString()).build(); + } + + protected void createDefaultTenantsAndClustersAndNamespace() throws Exception { + admin1.clusters().createCluster(cluster1, ClusterData.builder() + .serviceUrl(url1.toString()) + .serviceUrlTls(urlTls1.toString()) + .brokerServiceUrl(pulsar1.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar1.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(false) + .build()); + admin1.clusters().createCluster(cluster2, ClusterData.builder() + .serviceUrl(url2.toString()) + .serviceUrlTls(urlTls2.toString()) + .brokerServiceUrl(pulsar2.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar2.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(false) + .build()); + admin2.clusters().createCluster(cluster1, ClusterData.builder() + .serviceUrl(url1.toString()) + .serviceUrlTls(urlTls1.toString()) + .brokerServiceUrl(pulsar1.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar1.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(false) + .build()); + admin2.clusters().createCluster(cluster2, ClusterData.builder() + .serviceUrl(url2.toString()) + .serviceUrlTls(urlTls2.toString()) + .brokerServiceUrl(pulsar2.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar2.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(false) + .build()); + + admin1.tenants().createTenant(defaultTenant, new TenantInfoImpl(Collections.emptySet(), + Sets.newHashSet(cluster1, cluster2))); + admin2.tenants().createTenant(defaultTenant, new TenantInfoImpl(Collections.emptySet(), + Sets.newHashSet(cluster1, cluster2))); + + admin1.namespaces().createNamespace(defaultNamespace, Sets.newHashSet(cluster1, cluster2)); + admin2.namespaces().createNamespace(defaultNamespace); + } + + protected void cleanupTopics(CleanupTopicAction cleanupTopicAction) throws Exception { + admin1.namespaces().setNamespaceReplicationClusters(defaultNamespace, Collections.singleton(cluster1)); + admin1.namespaces().unload(defaultNamespace); + cleanupTopicAction.run(); + admin1.namespaces().setNamespaceReplicationClusters(defaultNamespace, Sets.newHashSet(cluster1, cluster2)); + } + + protected interface CleanupTopicAction { + void run() throws Exception; + } + + @Override + protected void setup() throws Exception { + incrementSetupNumber(); + + log.info("--- Starting OneWayReplicatorTestBase::setup ---"); + + startZKAndBK(); + + startBrokers(); + + createDefaultTenantsAndClustersAndNamespace(); + + Thread.sleep(100); + log.info("--- OneWayReplicatorTestBase::setup completed ---"); + } + + private void setConfigDefaults(ServiceConfiguration config, String clusterName, + LocalBookkeeperEnsemble bookkeeperEnsemble, ZookeeperServerTest brokerConfigZk) { + config.setClusterName(clusterName); + config.setAdvertisedAddress("localhost"); + config.setWebServicePort(Optional.of(0)); + config.setWebServicePortTls(Optional.of(0)); + config.setMetadataStoreUrl("zk:127.0.0.1:" + bookkeeperEnsemble.getZookeeperPort()); + config.setConfigurationMetadataStoreUrl("zk:127.0.0.1:" + brokerConfigZk.getZookeeperPort() + "/foo"); + config.setBrokerDeleteInactiveTopicsEnabled(false); + config.setBrokerDeleteInactiveTopicsFrequencySeconds(60); + config.setBrokerShutdownTimeoutMs(0L); + config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); + config.setBrokerServicePort(Optional.of(0)); + config.setBrokerServicePortTls(Optional.of(0)); + config.setBacklogQuotaCheckIntervalInSeconds(5); + config.setDefaultNumberOfNamespaceBundles(1); + config.setAllowAutoTopicCreationType(TopicType.NON_PARTITIONED); + config.setEnableReplicatedSubscriptions(true); + config.setReplicatedSubscriptionsSnapshotFrequencyMillis(1000); + } + + @Override + protected void cleanup() throws Exception { + markCurrentSetupNumberCleaned(); + log.info("--- Shutting down ---"); + + // Stop brokers. + client1.close(); + client2.close(); + admin1.close(); + admin2.close(); + if (pulsar2 != null) { + pulsar2.close(); + } + if (pulsar1 != null) { + pulsar1.close(); + } + + // Stop ZK and BK. + bkEnsemble1.stop(); + bkEnsemble2.stop(); + brokerConfigZk1.stop(); + brokerConfigZk2.stop(); + + // Reset configs. + config1 = new ServiceConfiguration(); + setConfigDefaults(config1, cluster1, bkEnsemble1, brokerConfigZk1); + config2 = new ServiceConfiguration(); + setConfigDefaults(config2, cluster2, bkEnsemble2, brokerConfigZk2); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java index 48798f0020f01..e77fd07c6ef8b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java @@ -243,6 +243,39 @@ public void findEntryFailed(ManagedLedgerException exception, Optional factory.shutdown(); } + @Test + void testPersistentMessageFinderWhenLastMessageDelete() throws Exception { + final String ledgerAndCursorName = "testPersistentMessageFinderWhenLastMessageDelete"; + + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setRetentionSizeInMB(10); + config.setMaxEntriesPerLedger(10); + config.setRetentionTime(1, TimeUnit.HOURS); + ManagedLedger ledger = factory.open(ledgerAndCursorName, config); + ManagedCursorImpl cursor = (ManagedCursorImpl) ledger.openCursor(ledgerAndCursorName); + + ledger.addEntry(createMessageWrittenToLedger("msg1")); + ledger.addEntry(createMessageWrittenToLedger("msg2")); + ledger.addEntry(createMessageWrittenToLedger("msg3")); + Position lastPosition = ledger.addEntry(createMessageWrittenToLedger("last-message")); + + long endTimestamp = System.currentTimeMillis() + 1000; + + Result result = new Result(); + // delete last position message + cursor.delete(lastPosition); + CompletableFuture future = findMessage(result, cursor, endTimestamp); + future.get(); + assertNull(result.exception); + assertNotEquals(result.position, null); + assertEquals(result.position, lastPosition); + + result.reset(); + cursor.close(); + ledger.close(); + factory.shutdown(); + } + @Test void testPersistentMessageFinderWithBrokerTimestampForMessage() throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicInitializeDelayTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicInitializeDelayTest.java new file mode 100644 index 0000000000000..ab8d4dbe5cc01 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicInitializeDelayTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.ManagedLedger; +import org.apache.pulsar.broker.service.nonpersistent.NonPersistentTopic; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +@Test(groups = "broker") +@Slf4j +public class PersistentTopicInitializeDelayTest extends BrokerTestBase { + + @BeforeMethod + @Override + protected void setup() throws Exception { + conf.setTopicFactoryClassName(MyTopicFactory.class.getName()); + conf.setAllowAutoTopicCreation(true); + conf.setManagedLedgerMaxEntriesPerLedger(1); + conf.setBrokerDeleteInactiveTopicsEnabled(false); + conf.setTransactionCoordinatorEnabled(false); + conf.setTopicLoadTimeoutSeconds(30); + super.baseSetup(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test(timeOut = 30 * 1000) + public void testTopicInitializeDelay() throws Exception { + admin.tenants().createTenant("public", TenantInfo.builder().allowedClusters(Set.of(configClusterName)).build()); + String namespace = "public/initialize-delay"; + admin.namespaces().createNamespace(namespace); + final String topicName = "persistent://" + namespace + "/testTopicInitializeDelay"; + admin.topics().createNonPartitionedTopic(topicName); + + admin.topicPolicies().setMaxConsumers(topicName, 10); + Awaitility.await().untilAsserted(() -> assertEquals(admin.topicPolicies().getMaxConsumers(topicName), 10)); + admin.topics().unload(topicName); + CompletableFuture> optionalFuture = pulsar.getBrokerService().getTopic(topicName, true); + + Optional topic = optionalFuture.get(15, TimeUnit.SECONDS); + assertTrue(topic.isPresent()); + } + + public static class MyTopicFactory implements TopicFactory { + @Override + public T create(String topic, ManagedLedger ledger, BrokerService brokerService, + Class topicClazz) { + try { + if (topicClazz == NonPersistentTopic.class) { + return (T) new NonPersistentTopic(topic, brokerService); + } else { + return (T) new MyPersistentTopic(topic, ledger, brokerService); + } + } catch (Exception e) { + throw new IllegalStateException(e); + } + } + + @Override + public void close() throws IOException { + // No-op + } + } + + public static class MyPersistentTopic extends PersistentTopic { + + private static AtomicInteger checkReplicationInvocationCount = new AtomicInteger(0); + + public MyPersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerService) { + super(topic, ledger, brokerService); + SystemTopicBasedTopicPoliciesService topicPoliciesService = + (SystemTopicBasedTopicPoliciesService) brokerService.getPulsar().getTopicPoliciesService(); + if (topicPoliciesService.getListeners().containsKey(TopicName.get(topic)) ) { + this.onUpdate(brokerService.getPulsar().getTopicPoliciesService().getTopicPoliciesIfExists(TopicName.get(topic))); + } + } + + protected void updateTopicPolicyByNamespacePolicy(Policies namespacePolicies) { + try { + Thread.sleep(10 * 1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + super.updateTopicPolicyByNamespacePolicy(namespacePolicies); + } + + public CompletableFuture checkReplication() { + if (TopicName.get(topic).getLocalName().equalsIgnoreCase("testTopicInitializeDelay")) { + checkReplicationInvocationCount.incrementAndGet(); + log.info("checkReplication, count = {}", checkReplicationInvocationCount.get()); + List configuredClusters = topicPolicies.getReplicationClusters().get(); + if (!(configuredClusters.size() == 1 && configuredClusters.contains(brokerService.pulsar().getConfiguration().getClusterName()))) { + try { + // this will cause the get topic timeout. + Thread.sleep(8 * 1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new RuntimeException("checkReplication error"); + } + } + return super.checkReplication(); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 45ef58bb7038c..d8b81a51b3085 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -105,6 +105,7 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.ProducerBuilderImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ProducerConfigurationData; @@ -131,6 +132,7 @@ import org.apache.pulsar.compaction.CompactedTopic; import org.apache.pulsar.compaction.CompactedTopicContext; import org.apache.pulsar.compaction.Compactor; +import org.apache.pulsar.compaction.CompactorMXBean; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; import org.awaitility.Awaitility; @@ -172,11 +174,13 @@ public void setup() throws Exception { svcConfig.setClusterName("pulsar-cluster"); svcConfig.setTopicLevelPoliciesEnabled(false); svcConfig.setSystemTopicEnabled(false); + Compactor compactor = mock(Compactor.class); + when(compactor.getStats()).thenReturn(mock(CompactorMXBean.class)); pulsarTestContext = PulsarTestContext.builderForNonStartableContext() .config(svcConfig) .spyByDefault() .useTestPulsarResources(metadataStore) - .compactor(mock(Compactor.class)) + .compactor(compactor) .build(); brokerService = pulsarTestContext.getBrokerService(); @@ -200,6 +204,7 @@ public void setup() throws Exception { doReturn(spy(DefaultEventLoop.class)).when(channel).eventLoop(); doReturn(channel).when(ctx).channel(); doReturn(ctx).when(serverCnx).ctx(); + doReturn(CompletableFuture.completedFuture(true)).when(serverCnx).checkConnectionLiveness(); NamespaceService nsSvc = mock(NamespaceService.class); NamespaceBundle bundle = mock(NamespaceBundle.class); @@ -678,7 +683,15 @@ public void testSubscribeUnsubscribe() throws Exception { f1.get(); // 2. duplicate subscribe - Future f2 = topic.subscribe(getSubscriptionOption(cmd)); + CommandSubscribe cmd2 = new CommandSubscribe() + .setConsumerId(2) + .setTopic(successTopicName) + .setSubscription(successSubName) + .setConsumerName("consumer-name") + .setReadCompacted(false) + .setRequestId(2) + .setSubType(SubType.Exclusive); + Future f2 = topic.subscribe(getSubscriptionOption(cmd2)); try { f2.get(); fail("should fail with exception"); @@ -743,19 +756,11 @@ public void testAddRemoveConsumer() throws Exception { sub.addConsumer(consumer); assertTrue(sub.getDispatcher().isConsumerConnected()); - // 2. duplicate add consumer - try { - sub.addConsumer(consumer).get(); - fail("Should fail with ConsumerBusyException"); - } catch (Exception e) { - assertTrue(e.getCause() instanceof BrokerServiceException.ConsumerBusyException); - } - - // 3. simple remove consumer + // 2. simple remove consumer sub.removeConsumer(consumer); assertFalse(sub.getDispatcher().isConsumerConnected()); - // 4. duplicate remove consumer + // 3. duplicate remove consumer try { sub.removeConsumer(consumer); fail("Should fail with ServerMetadataException"); @@ -1703,6 +1708,8 @@ public void testAtomicReplicationRemoval() throws Exception { ManagedCursor cursor = mock(ManagedCursorImpl.class); doReturn(remoteCluster).when(cursor).getName(); PulsarClientImpl pulsarClientMock = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClientMock.getCnxPool()).thenReturn(connectionPool); when(pulsarClientMock.newProducer(any())).thenAnswer( invocation -> { ProducerBuilderImpl producerBuilder = diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java index 07632814378d0..b13f150387bee 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java @@ -18,16 +18,25 @@ */ package org.apache.pulsar.broker.service; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; +import java.util.function.Function; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.common.policies.data.PublishRate; +import org.apache.pulsar.common.util.RateLimiter; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.Test; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - @Test(groups = "broker") +@Slf4j public class PrecisTopicPublishRateThrottleTest extends BrokerTestBase{ @Override @@ -164,7 +173,7 @@ public void testBrokerLevelPublishRateDynamicUpdate() throws Exception{ "" + rateInMsg)); Topic topicRef = pulsar.getBrokerService().getTopicReference(topic).get(); Assert.assertNotNull(topicRef); - PrecisPublishLimiter limiter = ((PrecisPublishLimiter) ((AbstractTopic) topicRef).topicPublishRateLimiter); + PrecisePublishLimiter limiter = ((PrecisePublishLimiter) ((AbstractTopic) topicRef).topicPublishRateLimiter); Awaitility.await().untilAsserted(() -> Assert.assertEquals(limiter.publishMaxMessageRate, rateInMsg)); Assert.assertEquals(limiter.publishMaxByteRate, 0); @@ -180,4 +189,64 @@ public void testBrokerLevelPublishRateDynamicUpdate() throws Exception{ producer.close(); super.internalCleanup(); } + + @Test + public void testWithConcurrentUpdate() throws Exception { + PublishRate publishRate = new PublishRate(-1,10); + conf.setPreciseTopicPublishRateLimiterEnable(true); + conf.setMaxPendingPublishRequestsPerConnection(1000); + super.baseSetup(); + admin.namespaces().setPublishRate("prop/ns-abc", publishRate); + final String topic = "persistent://prop/ns-abc/testWithConcurrentUpdate"; + @Cleanup + org.apache.pulsar.client.api.Producer producer = pulsarClient.newProducer() + .topic(topic) + .producerName("producer-name") + .create(); + + AbstractTopic topicRef = (AbstractTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + Assert.assertNotNull(topicRef); + + PublishRateLimiter topicPublishRateLimiter = Mockito.spy(topicRef.getTopicPublishRateLimiter()); + + AtomicBoolean blocking = new AtomicBoolean(false); + BiFunction, Long, Boolean> blockFunc = (function, acquirePermit) -> { + blocking.set(true); + while (blocking.get()) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + return function.apply(acquirePermit); + }; + + Mockito.doAnswer(invocation -> { + log.info("tryAcquire: {}, {}", invocation.getArgument(0), invocation.getArgument(1)); + RateLimiter publishRateLimiterOnMessage = + (RateLimiter) FieldUtils.readDeclaredField(topicPublishRateLimiter, + "topicPublishRateLimiterOnMessage", true); + RateLimiter publishRateLimiterOnByte = + (RateLimiter) FieldUtils.readDeclaredField(topicPublishRateLimiter, + "topicPublishRateLimiterOnByte", true); + int numbers = invocation.getArgument(0); + long bytes = invocation.getArgument(1); + return (publishRateLimiterOnMessage == null || publishRateLimiterOnMessage.tryAcquire(numbers)) + && (publishRateLimiterOnByte == null || blockFunc.apply(publishRateLimiterOnByte::tryAcquire, bytes)); + }).when(topicPublishRateLimiter).tryAcquire(Mockito.anyInt(), Mockito.anyLong()); + + FieldUtils.writeField(topicRef, "topicPublishRateLimiter", topicPublishRateLimiter, true); + + CompletableFuture sendFuture = producer.sendAsync(new byte[10]); + + Awaitility.await().untilAsserted(() -> Assert.assertTrue(blocking.get())); + publishRate.publishThrottlingRateInByte = 20; + admin.namespaces().setPublishRate("prop/ns-abc", publishRate); + blocking.set(false); + + sendFuture.join(); + + super.internalCleanup(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisPublishLimiterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisePublishLimiterTest.java similarity index 57% rename from pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisPublishLimiterTest.java rename to pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisePublishLimiterTest.java index a0d0df52d2417..73cb43d52b112 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisPublishLimiterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisePublishLimiterTest.java @@ -23,35 +23,35 @@ import org.apache.pulsar.common.policies.data.PublishRate; import org.testng.annotations.Test; -public class PrecisPublishLimiterTest { +public class PrecisePublishLimiterTest { @Test void shouldResetMsgLimitAfterUpdate() { - PrecisPublishLimiter precisPublishLimiter = new PrecisPublishLimiter(new PublishRate(), () -> { + PrecisePublishLimiter precisePublishLimiter = new PrecisePublishLimiter(new PublishRate(), () -> { }); - precisPublishLimiter.update(new PublishRate(1, 1)); - assertFalse(precisPublishLimiter.tryAcquire(99, 99)); - precisPublishLimiter.update(new PublishRate(-1, 100)); - assertTrue(precisPublishLimiter.tryAcquire(99, 99)); + precisePublishLimiter.update(new PublishRate(1, 1)); + assertFalse(precisePublishLimiter.tryAcquire(99, 99)); + precisePublishLimiter.update(new PublishRate(-1, 100)); + assertTrue(precisePublishLimiter.tryAcquire(99, 99)); } @Test void shouldResetBytesLimitAfterUpdate() { - PrecisPublishLimiter precisPublishLimiter = new PrecisPublishLimiter(new PublishRate(), () -> { + PrecisePublishLimiter precisePublishLimiter = new PrecisePublishLimiter(new PublishRate(), () -> { }); - precisPublishLimiter.update(new PublishRate(1, 1)); - assertFalse(precisPublishLimiter.tryAcquire(99, 99)); - precisPublishLimiter.update(new PublishRate(100, -1)); - assertTrue(precisPublishLimiter.tryAcquire(99, 99)); + precisePublishLimiter.update(new PublishRate(1, 1)); + assertFalse(precisePublishLimiter.tryAcquire(99, 99)); + precisePublishLimiter.update(new PublishRate(100, -1)); + assertTrue(precisePublishLimiter.tryAcquire(99, 99)); } @Test void shouldCloseResources() throws Exception { for (int i = 0; i < 20000; i++) { - PrecisPublishLimiter precisPublishLimiter = new PrecisPublishLimiter(new PublishRate(100, 100), () -> { + PrecisePublishLimiter precisePublishLimiter = new PrecisePublishLimiter(new PublishRate(100, 100), () -> { }); - precisPublishLimiter.tryAcquire(99, 99); - precisPublishLimiter.close(); + precisePublishLimiter.tryAcquire(99, 99); + precisePublishLimiter.close(); } } -} \ No newline at end of file +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PublishRateLimiterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PublishRateLimiterTest.java index 9d2bd49ba04bd..b934ced08c5db 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PublishRateLimiterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PublishRateLimiterTest.java @@ -39,7 +39,7 @@ public class PublishRateLimiterTest { private final PublishRate publishRate = new PublishRate(10, 100); private final PublishRate newPublishRate = new PublishRate(20, 200); - private PrecisPublishLimiter precisPublishLimiter; + private PrecisePublishLimiter precisePublishLimiter; private PublishRateLimiterImpl publishRateLimiter; @BeforeMethod @@ -47,7 +47,7 @@ public void setup() throws Exception { policies.publishMaxMessageRate = new HashMap<>(); policies.publishMaxMessageRate.put(CLUSTER_NAME, publishRate); - precisPublishLimiter = new PrecisPublishLimiter(policies, CLUSTER_NAME, () -> System.out.print("Refresh permit")); + precisePublishLimiter = new PrecisePublishLimiter(policies, CLUSTER_NAME, () -> System.out.print("Refresh permit")); publishRateLimiter = new PublishRateLimiterImpl(policies, CLUSTER_NAME); } @@ -88,23 +88,25 @@ public void testPublishRateLimiterImplUpdate() { @Test public void testPrecisePublishRateLimiterUpdate() { - assertFalse(precisPublishLimiter.tryAcquire(15, 150)); + assertFalse(precisePublishLimiter.tryAcquire(15, 150)); //update - precisPublishLimiter.update(newPublishRate); - assertTrue(precisPublishLimiter.tryAcquire(15, 150)); + precisePublishLimiter.update(newPublishRate); + assertTrue(precisePublishLimiter.tryAcquire(15, 150)); } @Test public void testPrecisePublishRateLimiterAcquire() throws Exception { - Class precisPublishLimiterClass = Class.forName("org.apache.pulsar.broker.service.PrecisPublishLimiter"); - Field topicPublishRateLimiterOnMessageField = precisPublishLimiterClass.getDeclaredField("topicPublishRateLimiterOnMessage"); - Field topicPublishRateLimiterOnByteField = precisPublishLimiterClass.getDeclaredField("topicPublishRateLimiterOnByte"); + Class precisePublishLimiterClass = Class.forName("org.apache.pulsar.broker.service.PrecisePublishLimiter"); + Field topicPublishRateLimiterOnMessageField = precisePublishLimiterClass.getDeclaredField("topicPublishRateLimiterOnMessage"); + Field topicPublishRateLimiterOnByteField = precisePublishLimiterClass.getDeclaredField("topicPublishRateLimiterOnByte"); topicPublishRateLimiterOnMessageField.setAccessible(true); topicPublishRateLimiterOnByteField.setAccessible(true); - RateLimiter topicPublishRateLimiterOnMessage = (RateLimiter)topicPublishRateLimiterOnMessageField.get(precisPublishLimiter); - RateLimiter topicPublishRateLimiterOnByte = (RateLimiter)topicPublishRateLimiterOnByteField.get(precisPublishLimiter); + RateLimiter topicPublishRateLimiterOnMessage = (RateLimiter)topicPublishRateLimiterOnMessageField.get( + precisePublishLimiter); + RateLimiter topicPublishRateLimiterOnByte = (RateLimiter)topicPublishRateLimiterOnByteField.get( + precisePublishLimiter); Method renewTopicPublishRateLimiterOnMessageMethod = topicPublishRateLimiterOnMessage.getClass().getDeclaredMethod("renew", null); Method renewTopicPublishRateLimiterOnByteMethod = topicPublishRateLimiterOnByte.getClass().getDeclaredMethod("renew", null); @@ -112,7 +114,7 @@ public void testPrecisePublishRateLimiterAcquire() throws Exception { renewTopicPublishRateLimiterOnByteMethod.setAccessible(true); // running tryAcquire in order to lazyInit the renewTask - precisPublishLimiter.tryAcquire(1, 10); + precisePublishLimiter.tryAcquire(1, 10); Field onMessageRenewTaskField = topicPublishRateLimiterOnMessage.getClass().getDeclaredField("renewTask"); Field onByteRenewTaskField = topicPublishRateLimiterOnByte.getClass().getDeclaredField("renewTask"); @@ -129,30 +131,30 @@ public void testPrecisePublishRateLimiterAcquire() throws Exception { renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); // tryAcquire not exceeded - assertTrue(precisPublishLimiter.tryAcquire(1, 10)); + assertTrue(precisePublishLimiter.tryAcquire(1, 10)); renewTopicPublishRateLimiterOnMessageMethod.invoke(topicPublishRateLimiterOnMessage); renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); // tryAcquire numOfMessages exceeded - assertFalse(precisPublishLimiter.tryAcquire(11, 100)); + assertFalse(precisePublishLimiter.tryAcquire(11, 100)); renewTopicPublishRateLimiterOnMessageMethod.invoke(topicPublishRateLimiterOnMessage); renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); // tryAcquire msgSizeInBytes exceeded - assertFalse(precisPublishLimiter.tryAcquire(10, 101)); + assertFalse(precisePublishLimiter.tryAcquire(10, 101)); renewTopicPublishRateLimiterOnMessageMethod.invoke(topicPublishRateLimiterOnMessage); renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); renewTopicPublishRateLimiterOnMessageMethod.invoke(topicPublishRateLimiterOnMessage); renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); // tryAcquire exceeded exactly - assertFalse(precisPublishLimiter.tryAcquire(10, 100)); + assertFalse(precisePublishLimiter.tryAcquire(10, 100)); renewTopicPublishRateLimiterOnMessageMethod.invoke(topicPublishRateLimiterOnMessage); renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); renewTopicPublishRateLimiterOnMessageMethod.invoke(topicPublishRateLimiterOnMessage); renewTopicPublishRateLimiterOnByteMethod.invoke(topicPublishRateLimiterOnByte); // tryAcquire not exceeded - assertTrue(precisPublishLimiter.tryAcquire(9, 99)); + assertTrue(precisePublishLimiter.tryAcquire(9, 99)); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java index 3982f44905d6b..fe519827be74a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java @@ -24,12 +24,15 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import com.google.common.collect.Sets; import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -39,20 +42,29 @@ import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.persistent.ReplicatedSubscriptionsController; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.common.policies.data.PartitionedTopicStats; +import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; /** @@ -154,6 +166,93 @@ public void testReplicatedSubscriptionAcrossTwoRegions() throws Exception { "messages don't match."); } + @Test + public void testReplicatedSubscribeAndSwitchToStandbyCluster() throws Exception { + final String namespace = BrokerTestUtil.newUniqueName("pulsar/ns_"); + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + namespace + "/tp_"); + final String subscriptionName = "s1"; + final boolean isReplicatedSubscription = true; + final int messagesCount = 20; + final LinkedHashSet sentMessages = new LinkedHashSet<>(); + final Set receivedMessages = Collections.synchronizedSet(new LinkedHashSet<>()); + admin1.namespaces().createNamespace(namespace); + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); + admin1.topics().createNonPartitionedTopic(topicName); + admin1.topics().createSubscription(topicName, subscriptionName, MessageId.earliest, isReplicatedSubscription); + final PersistentTopic topic1 = + (PersistentTopic) pulsar1.getBrokerService().getTopic(topicName, false).join().get(); + + // Send messages + // Wait for the topic created on the cluster2. + // Wait for the snapshot created. + final PulsarClient client1 = PulsarClient.builder().serviceUrl(url1.toString()).build(); + Producer producer1 = client1.newProducer(Schema.STRING).topic(topicName).enableBatching(false).create(); + Consumer consumer1 = client1.newConsumer(Schema.STRING).topic(topicName) + .subscriptionName(subscriptionName).replicateSubscriptionState(isReplicatedSubscription).subscribe(); + for (int i = 0; i < messagesCount / 2; i++) { + String msg = i + ""; + producer1.send(msg); + sentMessages.add(msg); + } + Awaitility.await().untilAsserted(() -> { + ConcurrentOpenHashMap replicators = topic1.getReplicators(); + assertTrue(replicators != null && replicators.size() == 1, "Replicator should started"); + assertTrue(replicators.values().iterator().next().isConnected(), "Replicator should be connected"); + assertTrue(topic1.getReplicatedSubscriptionController().get().getLastCompletedSnapshotId().isPresent(), + "One snapshot should be finished"); + }); + final PersistentTopic topic2 = + (PersistentTopic) pulsar2.getBrokerService().getTopic(topicName, false).join().get(); + Awaitility.await().untilAsserted(() -> { + assertTrue(topic2.getReplicatedSubscriptionController().isPresent(), + "Replicated subscription controller should created"); + }); + for (int i = messagesCount / 2; i < messagesCount; i++) { + String msg = i + ""; + producer1.send(msg); + sentMessages.add(msg); + } + + // Consume half messages and wait the subscription created on the cluster2. + for (int i = 0; i < messagesCount / 2; i++){ + Message message = consumer1.receive(2, TimeUnit.SECONDS); + if (message == null) { + fail("Should not receive null."); + } + receivedMessages.add(message.getValue()); + consumer1.acknowledge(message); + } + Awaitility.await().untilAsserted(() -> { + assertNotNull(topic2.getSubscriptions().get(subscriptionName), "Subscription should created"); + }); + + // Switch client to cluster2. + // Since the cluster1 was not crash, all messages will be replicated to the cluster2. + consumer1.close(); + final PulsarClient client2 = PulsarClient.builder().serviceUrl(url2.toString()).build(); + final Consumer consumer2 = client2.newConsumer(Schema.AUTO_CONSUME()).topic(topicName) + .subscriptionName(subscriptionName).replicateSubscriptionState(isReplicatedSubscription).subscribe(); + + // Verify all messages will be consumed. + Awaitility.await().untilAsserted(() -> { + while (true) { + Message message = consumer2.receive(2, TimeUnit.SECONDS); + if (message != null) { + receivedMessages.add(message.getValue().toString()); + consumer2.acknowledge(message); + } else { + break; + } + } + assertEquals(receivedMessages.size(), sentMessages.size()); + }); + + consumer2.close(); + producer1.close(); + client1.close(); + client2.close(); + } + /** * If there's no traffic, the snapshot creation should stop and then resume when traffic comes back */ @@ -530,6 +629,27 @@ public void testReplicatedSubscriptionRestApi2() throws Exception { String.format("numReceivedMessages2 (%d) should be less than %d", numReceivedMessages2, numMessages)); } + @Test(timeOut = 30000) + public void testReplicatedSubscriptionRestApi3() throws Exception { + final String namespace = BrokerTestUtil.newUniqueName("geo/replicatedsubscription"); + final String topicName = "persistent://" + namespace + "/topic-rest-api3"; + final String subName = "sub"; + admin4.tenants().createTenant("geo", + new TenantInfoImpl(Sets.newHashSet("appid1", "appid4"), Sets.newHashSet(cluster1, cluster4))); + admin4.namespaces().createNamespace(namespace); + admin4.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet(cluster1, cluster4)); + admin4.topics().createPartitionedTopic(topicName, 2); + + @Cleanup + final PulsarClient client4 = PulsarClient.builder().serviceUrl(url4.toString()) + .statsInterval(0, TimeUnit.SECONDS).build(); + + Consumer consumer4 = client4.newConsumer().topic(topicName).subscriptionName(subName).subscribe(); + Assert.expectThrows(PulsarAdminException.class, () -> + admin4.topics().setReplicatedSubscriptionStatus(topicName, subName, true)); + consumer4.close(); + } + /** * Tests replicated subscriptions when replicator producer is closed */ @@ -613,6 +733,273 @@ public void testReplicatedSubscriptionWhenReplicatorProducerIsClosed() throws Ex Awaitility.await().untilAsserted(() -> assertNotNull(topic2.getSubscription(subscriptionName))); } + @DataProvider(name = "isTopicPolicyEnabled") + private Object[][] isTopicPolicyEnabled() { + // Todo: fix replication can not be enabled at topic level. + return new Object[][] { { Boolean.FALSE } }; + } + + /** + * Test the replication subscription can work normal in the following cases: + *

+ * 1. Do not write data into the original topic when the topic does not configure a remote cluster. {topic1} + * 1. Publish message to the topic and then wait a moment, + * the backlog will not increase after publishing completely. + * 2. Acknowledge the messages, the last confirm entry does not change. + * 2. Snapshot and mark will be written after topic configure a remote cluster. {topic2} + * 1. publish message to topic. After publishing completely, the backlog of the topic keep increase. + * 2. Wait the snapshot complete, the backlog stop changing. + * 3. Publish messages to wait another snapshot complete. + * 4. Ack messages to move the mark delete position after the position record in the first snapshot. + * 5. Check new entry (a mark) appending to the original topic. + * 3. Stopping writing snapshot and mark after remove the remote cluster of the topic. {topic2} + * similar to step 1. + *

+ */ + @Test(dataProvider = "isTopicPolicyEnabled") + public void testWriteMarkerTaskOfReplicateSubscriptions(boolean isTopicPolicyEnabled) throws Exception { + // 1. Prepare resource and use proper configuration. + String namespace = BrokerTestUtil.newUniqueName("pulsar/testReplicateSubBackLog"); + String topic1 = "persistent://" + namespace + "/replication-enable"; + String topic2 = "persistent://" + namespace + "/replication-disable"; + String subName = "sub"; + + admin1.namespaces().createNamespace(namespace); + pulsar1.getConfiguration().setTopicLevelPoliciesEnabled(isTopicPolicyEnabled); + pulsar1.getConfiguration().setReplicationPolicyCheckDurationSeconds(1); + pulsar1.getConfiguration().setReplicatedSubscriptionsSnapshotFrequencyMillis(1000); + // 2. Build Producer and Consumer. + @Cleanup + PulsarClient client1 = PulsarClient.builder().serviceUrl(url1.toString()) + .statsInterval(0, TimeUnit.SECONDS) + .build(); + @Cleanup + Consumer consumer1 = client1.newConsumer() + .topic(topic1) + .subscriptionName(subName) + .ackTimeout(5, TimeUnit.SECONDS) + .subscriptionType(SubscriptionType.Shared) + .replicateSubscriptionState(true) + .subscribe(); + @Cleanup + Producer producer1 = client1.newProducer() + .topic(topic1) + .create(); + // 3. Test replication subscription work as expected. + // Test case 1: disable replication, backlog will not increase. + testReplicatedSubscriptionWhenDisableReplication(producer1, consumer1, topic1); + + // Test case 2: enable replication, mark and snapshot work as expected. + if (isTopicPolicyEnabled) { + admin1.topics().createNonPartitionedTopic(topic2); + admin1.topics().setReplicationClusters(topic2, List.of("r1", "r2")); + } else { + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); + } + @Cleanup + Consumer consumer2 = client1.newConsumer() + .topic(topic2) + .subscriptionName(subName) + .ackTimeout(5, TimeUnit.SECONDS) + .subscriptionType(SubscriptionType.Shared) + .replicateSubscriptionState(true) + .subscribe(); + @Cleanup + Producer producer2 = client1.newProducer() + .topic(topic2) + .create(); + testReplicatedSubscriptionWhenEnableReplication(producer2, consumer2, topic2); + + // Test case 3: enable replication, mark and snapshot work as expected. + if (isTopicPolicyEnabled) { + admin1.topics().setReplicationClusters(topic2, List.of("r1")); + } else { + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1")); + } + testReplicatedSubscriptionWhenDisableReplication(producer2, consumer2, topic2); + // 4. Clear resource. + pulsar1.getConfiguration().setForceDeleteNamespaceAllowed(true); + admin1.namespaces().deleteNamespace(namespace, true); + pulsar1.getConfiguration().setForceDeleteNamespaceAllowed(false); + } + + @Test + public void testReplicatedSubscriptionWithCompaction() throws Exception { + final String namespace = BrokerTestUtil.newUniqueName("pulsar/replicatedsubscription"); + final String topicName = "persistent://" + namespace + "/testReplicatedSubscriptionWithCompaction"; + final String subName = "sub"; + + admin1.namespaces().createNamespace(namespace); + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); + admin1.topics().createNonPartitionedTopic(topicName); + admin1.topicPolicies().setCompactionThreshold(topicName, 100 * 1024 * 1024L); + + @Cleanup final PulsarClient client = PulsarClient.builder().serviceUrl(url1.toString()) + .statsInterval(0, TimeUnit.SECONDS).build(); + + Producer producer = client.newProducer(Schema.STRING).topic(topicName).create(); + producer.newMessage().key("K1").value("V1").send(); + producer.newMessage().key("K1").value("V2").send(); + producer.close(); + + createReplicatedSubscription(client, topicName, subName, true); + Awaitility.await().untilAsserted(() -> { + Map status = admin1.topics().getReplicatedSubscriptionStatus(topicName, subName); + assertTrue(status.get(topicName)); + }); + + Awaitility.await().untilAsserted(() -> { + PersistentTopic t1 = (PersistentTopic) pulsar1.getBrokerService() + .getTopic(topicName, false).get().get(); + ReplicatedSubscriptionsController rsc1 = t1.getReplicatedSubscriptionController().get(); + Assert.assertTrue(rsc1.getLastCompletedSnapshotId().isPresent()); + assertEquals(t1.getPendingWriteOps().get(), 0L); + }); + + admin1.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin1.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Consumer consumer = client.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName("sub2") + .subscriptionType(SubscriptionType.Exclusive) + .readCompacted(true) + .subscribe(); + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + + result.add(receive.getValue()); + } + + Assert.assertEquals(result, List.of("V2")); + } + + /** + * Disable replication subscription. + * Test scheduled task case. + * 1. Send three messages |1:0|1:1|1:2|. + * 2. Get topic backlog, as backlog1. + * 3. Wait a moment. + * 4. Get the topic backlog again, the backlog will not increase. + * Test acknowledge messages case. + * 1. Get the last confirm entry, as LAC1. + * 2. Acknowledge these messages |1:0|1:1|. + * 3. wait a moment. + * 4. Get the last confirm entry, as LAC2. LAC1 is equal to LAC2. + * Clear environment. + * 1. Ack all the retained messages. |1:2| + * 2. Wait for the backlog to return to zero. + */ + private void testReplicatedSubscriptionWhenDisableReplication(Producer producer, Consumer consumer, + String topic) throws Exception { + final int messageSum = 3; + // Test scheduled task case. + for (int i = 0; i < messageSum; i++) { + producer.newMessage().send(); + } + long backlog1 = admin1.topics().getStats(topic, false).getBacklogSize(); + Thread.sleep(3000); + long backlog2 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertEquals(backlog1, backlog2); + // Test acknowledge messages case. + String lastConfirmEntry1 = admin1.topics().getInternalStats(topic).lastConfirmedEntry; + for (int i = 0; i < messageSum - 1; i++) { + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + } + Awaitility.await().untilAsserted(() -> { + String lastConfirmEntry2 = admin1.topics().getInternalStats(topic).lastConfirmedEntry; + assertEquals(lastConfirmEntry1, lastConfirmEntry2); + }); + // Clear environment. + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + Awaitility.await().untilAsserted(() -> { + long backlog4 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertEquals(backlog4, 0); + }); + } + + /** + * Enable replication subscription. + * Test scheduled task case. + * 1. Wait replicator connected. + * 2. Send three messages |1:0|1:1|1:2|. + * 3. Get topic backlog, as backlog1. + * 4. Wait a moment. + * 5. Get the topic backlog again, as backlog2. The backlog2 is bigger than backlog1. |1:0|1:1|1:2|mark|. + * 6. Wait the snapshot complete. + * Test acknowledge messages case. + * 1. Write messages and wait another snapshot complete. |1:0|1:1|1:2|mark|1:3|1:4|1:5|mark| + * 2. Ack message |1:0|1:1|1:2|1:3|1:4|. + * 3. Get last confirm entry, as LAC1. + * 2. Wait a moment. + * 3. Get Last confirm entry, as LAC2. LAC2 different to LAC1. |1:5|mark|mark| + * Clear environment. + * 1. Ack all the retained message |1:5|. + * 2. Wait for the backlog to return to zero. + */ + private void testReplicatedSubscriptionWhenEnableReplication(Producer producer, Consumer consumer, + String topic) throws Exception { + final int messageSum = 3; + Awaitility.await().untilAsserted(() -> { + List keys = pulsar1.getBrokerService() + .getTopic(topic, false).get().get() + .getReplicators().keys(); + assertEquals(keys.size(), 1); + assertTrue(pulsar1.getBrokerService() + .getTopic(topic, false).get().get() + .getReplicators().get(keys.get(0)).isConnected()); + }); + // Test scheduled task case. + sendMessageAndWaitSnapshotComplete(producer, topic, messageSum); + // Test acknowledge messages case. + // After snapshot write completely, acknowledging message to move the mark delete position + // after the position recorded in the snapshot will trigger to write a new marker. + sendMessageAndWaitSnapshotComplete(producer, topic, messageSum); + String lastConfirmedEntry3 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + for (int i = 0; i < messageSum * 2 - 1; i++) { + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + } + Awaitility.await().untilAsserted(() -> { + String lastConfirmedEntry4 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + assertNotEquals(lastConfirmedEntry3, lastConfirmedEntry4); + }); + // Clear environment. + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + Awaitility.await().untilAsserted(() -> { + long backlog4 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertEquals(backlog4, 0); + }); + } + + private void sendMessageAndWaitSnapshotComplete(Producer producer, String topic, + int messageSum) throws Exception { + for (int i = 0; i < messageSum; i++) { + producer.newMessage().send(); + } + long backlog1 = admin1.topics().getStats(topic, false).getBacklogSize(); + Awaitility.await().untilAsserted(() -> { + long backlog2 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertTrue(backlog2 > backlog1); + }); + // Wait snapshot write completely, stop writing marker into topic. + Awaitility.await().untilAsserted(() -> { + String lastConfirmedEntry1 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + PersistentTopicInternalStats persistentTopicInternalStats = admin1.topics().getInternalStats(topic, false); + Thread.sleep(1000); + String lastConfirmedEntry2 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + assertEquals(lastConfirmedEntry1, lastConfirmedEntry2); + }); + } + void publishMessages(Producer producer, int startIndex, int numMessages, Set sentMessages) throws PulsarClientException { for (int i = startIndex; i < startIndex + numMessages; i++) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java index 901451c022bfe..3fa3a8dfc0f15 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java @@ -21,6 +21,7 @@ import static org.apache.pulsar.broker.BrokerTestUtil.newUniqueName; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.retryStrategically; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; @@ -51,8 +52,10 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import lombok.Cleanup; +import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCursorCallback; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -60,7 +63,6 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException.CursorAlreadyClosedException; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; -import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.State; import org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; @@ -81,6 +83,7 @@ import org.apache.pulsar.client.api.RawMessage; import org.apache.pulsar.client.api.RawReader; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.TypedMessageBuilder; import org.apache.pulsar.client.api.schema.GenericRecord; @@ -236,16 +239,14 @@ public void activeBrokerParse() throws Exception { pulsar1.getConfiguration().setAuthorizationEnabled(true); //init clusterData - String cluster2ServiceUrls = String.format("%s,localhost:1234,localhost:5678,localhost:5677,localhost:5676", - pulsar2.getWebServiceAddress()); - ClusterData cluster2Data = ClusterData.builder().serviceUrl(cluster2ServiceUrls).build(); + ClusterData cluster2Data = ClusterData.builder().serviceUrl(pulsar2.getWebServiceAddress()).build(); String cluster2 = "activeCLuster2"; admin2.clusters().createCluster(cluster2, cluster2Data); Awaitility.await().until(() -> admin2.clusters().getCluster(cluster2) != null); List list = admin1.brokers().getActiveBrokers(cluster2); - assertEquals(list.get(0), url2.toString().replace("http://", "")); + assertEquals(list.get(0), pulsar2.getBrokerId()); //restore configuration pulsar1.getConfiguration().setAuthorizationEnabled(false); } @@ -1645,20 +1646,41 @@ public void testReplicatorWithFailedAck() throws Exception { log.info("--- Starting ReplicatorTest::testReplication ---"); String namespace = "pulsar/global/ns2"; - admin1.namespaces().createNamespace(namespace); - admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); + admin1.namespaces().createNamespace(namespace, Sets.newHashSet("r1")); final TopicName dest = TopicName .get(BrokerTestUtil.newUniqueName("persistent://" + namespace + "/ackFailedTopic")); @Cleanup MessageProducer producer1 = new MessageProducer(url1, dest); - log.info("--- Starting producer --- " + url1); + PersistentTopic topic = (PersistentTopic) pulsar1.getBrokerService().getTopic(dest.toString(), false) + .getNow(null).get(); + final ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) topic.getManagedLedger(); + final ManagedCursorImpl cursor = (ManagedCursorImpl) managedLedger.openCursor("pulsar.repl.r2"); + final ManagedCursorImpl spyCursor = spy(cursor); + managedLedger.getCursors().removeCursor(cursor.getName()); + managedLedger.getCursors().add(spyCursor, PositionImpl.EARLIEST); + AtomicBoolean isMakeAckFail = new AtomicBoolean(false); + doAnswer(invocation -> { + Position pos = (Position) invocation.getArguments()[0]; + AsyncCallbacks.DeleteCallback cb = (AsyncCallbacks.DeleteCallback) invocation.getArguments()[1]; + Object ctx = invocation.getArguments()[2]; + if (isMakeAckFail.get()) { + log.info("async-delete {} will be failed", pos); + cb.deleteFailed(new ManagedLedgerException("mocked error"), ctx); + } else { + log.info("async-delete {} will success", pos); + cursor.asyncDelete(pos, cb, ctx); + } + return null; + }).when(spyCursor).asyncDelete(Mockito.any(Position.class), Mockito.any(AsyncCallbacks.DeleteCallback.class), + Mockito.any()); + + log.info("--- Starting producer --- " + url1); + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); // Produce from cluster1 and consume from the rest producer1.produce(2); - PersistentTopic topic = (PersistentTopic) pulsar1.getBrokerService().getTopic(dest.toString(), false) - .getNow(null).get(); MessageIdImpl lastMessageId = (MessageIdImpl) topic.getLastMessageId().get(); Position lastPosition = PositionImpl.get(lastMessageId.getLedgerId(), lastMessageId.getEntryId()); ConcurrentOpenHashMap replicators = topic.getReplicators(); @@ -1667,25 +1689,19 @@ public void testReplicatorWithFailedAck() throws Exception { Awaitility.await().pollInterval(1, TimeUnit.SECONDS).timeout(30, TimeUnit.SECONDS) .untilAsserted(() -> assertEquals(org.apache.pulsar.broker.service.AbstractReplicator.State.Started, replicator.getState())); - assertEquals(replicator.getState(), org.apache.pulsar.broker.service.AbstractReplicator.State.Started); - ManagedCursorImpl cursor = (ManagedCursorImpl) replicator.getCursor(); // Make sure all the data has replicated to the remote cluster before close the cursor. Awaitility.await().untilAsserted(() -> assertEquals(cursor.getMarkDeletedPosition(), lastPosition)); - cursor.setState(State.Closed); - - Field field = ManagedCursorImpl.class.getDeclaredField("state"); - field.setAccessible(true); - field.set(cursor, State.Closed); + isMakeAckFail.set(true); producer1.produce(10); // The cursor is closed, so the mark delete position will not move forward. assertEquals(cursor.getMarkDeletedPosition(), lastPosition); - field.set(cursor, State.Open); + isMakeAckFail.set(false); Awaitility.await().timeout(30, TimeUnit.SECONDS).until( () -> { @@ -1756,4 +1772,74 @@ public void testReplicatorProducerNotExceed() throws Exception { Assert.assertThrows(PulsarClientException.ProducerBusyException.class, () -> new MessageProducer(url2, dest2)); } + + @Test + public void testReplicatorWithTTL() throws Exception { + log.info("--- Starting ReplicatorTest::testReplicatorWithTTL ---"); + + final String cluster1 = pulsar1.getConfig().getClusterName(); + final String cluster2 = pulsar2.getConfig().getClusterName(); + final String namespace = BrokerTestUtil.newUniqueName("pulsar/ns"); + final TopicName topic = TopicName + .get(BrokerTestUtil.newUniqueName("persistent://" + namespace + "/testReplicatorWithTTL")); + admin1.namespaces().createNamespace(namespace, Sets.newHashSet(cluster1, cluster2)); + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet(cluster1, cluster2)); + admin1.topics().createNonPartitionedTopic(topic.toString()); + admin1.topicPolicies().setMessageTTL(topic.toString(), 1); + + @Cleanup + PulsarClient client1 = PulsarClient.builder().serviceUrl(url1.toString()).statsInterval(0, TimeUnit.SECONDS) + .build(); + + @Cleanup + Producer persistentProducer1 = client1.newProducer().topic(topic.toString()).create(); + persistentProducer1.send("V1".getBytes()); + + waitReplicateFinish(topic, admin1); + + PersistentTopic persistentTopic = + (PersistentTopic) pulsar1.getBrokerService().getTopicReference(topic.toString()).get(); + persistentTopic.getReplicators().forEach((cluster, replicator) -> { + PersistentReplicator persistentReplicator = (PersistentReplicator) replicator; + // Pause replicator + persistentReplicator.disconnect(); + }); + + persistentProducer1.send("V2".getBytes()); + persistentProducer1.send("V3".getBytes()); + + Thread.sleep(1000); + + admin1.topics().expireMessagesForAllSubscriptions(topic.toString(), 1); + + persistentTopic.getReplicators().forEach((cluster, replicator) -> { + PersistentReplicator persistentReplicator = (PersistentReplicator) replicator; + persistentReplicator.startProducer(); + }); + + waitReplicateFinish(topic, admin1); + + persistentProducer1.send("V4".getBytes()); + + waitReplicateFinish(topic, admin1); + + @Cleanup + PulsarClient client2 = PulsarClient.builder().serviceUrl(url2.toString()).statsInterval(0, TimeUnit.SECONDS) + .build(); + + @Cleanup + Consumer consumer = client2.newConsumer().topic(topic.toString()) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscriptionName("sub").subscribe(); + + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + result.add(new String(receive.getValue())); + } + + assertEquals(result, Lists.newArrayList("V1", "V2", "V3", "V4")); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java index b83e8ac9d2dbf..beb1a3c4b9309 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java @@ -82,6 +82,13 @@ public abstract class ReplicatorTestBase extends TestRetrySupport { PulsarAdmin admin3; LocalBookkeeperEnsemble bkEnsemble3; + URL url4; + URL urlTls4; + ServiceConfiguration config4 = new ServiceConfiguration(); + PulsarService pulsar4; + PulsarAdmin admin4; + LocalBookkeeperEnsemble bkEnsemble4; + ZookeeperServerTest globalZkS; ExecutorService executor; @@ -111,6 +118,7 @@ public abstract class ReplicatorTestBase extends TestRetrySupport { protected final String cluster1 = "r1"; protected final String cluster2 = "r2"; protected final String cluster3 = "r3"; + protected final String cluster4 = "r4"; // Default frequency public int getBrokerServicePurgeInactiveFrequency() { @@ -178,6 +186,21 @@ protected void setup() throws Exception { urlTls3 = new URL(pulsar3.getWebServiceAddressTls()); admin3 = PulsarAdmin.builder().serviceHttpUrl(url3.toString()).build(); + // Start region 4 + + // Start zk & bks + bkEnsemble4 = new LocalBookkeeperEnsemble(3, 0, () -> 0); + bkEnsemble4.start(); + + setConfig4DefaultValue(); + pulsar4 = new PulsarService(config4); + pulsar4.start(); + + url4 = new URL(pulsar4.getWebServiceAddress()); + urlTls4 = new URL(pulsar4.getWebServiceAddressTls()); + admin4 = PulsarAdmin.builder().serviceHttpUrl(url4.toString()).build(); + + // Provision the global namespace admin1.clusters().createCluster(cluster1, ClusterData.builder() .serviceUrl(url1.toString()) @@ -230,6 +253,23 @@ protected void setup() throws Exception { .brokerClientTlsTrustStorePassword(keyStorePassword) .brokerClientTlsTrustStoreType(keyStoreType) .build()); + admin4.clusters().createCluster(cluster4, ClusterData.builder() + .serviceUrl(url4.toString()) + .serviceUrlTls(urlTls4.toString()) + .brokerServiceUrl(pulsar4.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar4.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(true) + .brokerClientCertificateFilePath(clientCertFilePath) + .brokerClientKeyFilePath(clientKeyFilePath) + .brokerClientTrustCertsFilePath(caCertFilePath) + .brokerClientTlsEnabledWithKeyStore(tlsWithKeyStore) + .brokerClientTlsKeyStore(clientKeyStorePath) + .brokerClientTlsKeyStorePassword(keyStorePassword) + .brokerClientTlsKeyStoreType(keyStoreType) + .brokerClientTlsTrustStore(clientTrustStorePath) + .brokerClientTlsTrustStorePassword(keyStorePassword) + .brokerClientTlsTrustStoreType(keyStoreType) + .build()); admin1.tenants().createTenant("pulsar", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2", "appid3"), Sets.newHashSet("r1", "r2", "r3"))); @@ -257,7 +297,7 @@ protected void setup() throws Exception { } public void setConfig3DefaultValue() { - setConfigDefaults(config3, "r3", bkEnsemble3); + setConfigDefaults(config3, cluster3, bkEnsemble3); config3.setTlsEnabled(true); } @@ -269,6 +309,11 @@ public void setConfig2DefaultValue() { setConfigDefaults(config2, cluster2, bkEnsemble2); } + public void setConfig4DefaultValue() { + setConfigDefaults(config4, cluster4, bkEnsemble4); + config4.setEnableReplicatedSubscriptions(false); + } + private void setConfigDefaults(ServiceConfiguration config, String clusterName, LocalBookkeeperEnsemble bookkeeperEnsemble) { config.setClusterName(clusterName); @@ -316,6 +361,11 @@ public void resetConfig3() { setConfig3DefaultValue(); } + public void resetConfig4() { + config4 = new ServiceConfiguration(); + setConfig4DefaultValue(); + } + private int inSec(int time, TimeUnit unit) { return (int) TimeUnit.SECONDS.convert(time, unit); } @@ -332,7 +382,11 @@ protected void cleanup() throws Exception { admin1.close(); admin2.close(); admin3.close(); + admin4.close(); + if (pulsar4 != null) { + pulsar4.close(); + } if (pulsar3 != null) { pulsar3.close(); } @@ -346,11 +400,13 @@ protected void cleanup() throws Exception { bkEnsemble1.stop(); bkEnsemble2.stop(); bkEnsemble3.stop(); + bkEnsemble4.stop(); globalZkS.stop(); resetConfig1(); resetConfig2(); resetConfig3(); + resetConfig4(); } static class MessageProducer implements AutoCloseable { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java index c3bab634a42c1..0f0440d24dde7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java @@ -45,8 +45,11 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandler; +import io.netty.channel.DefaultChannelId; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.vertx.core.impl.ConcurrentHashSet; +import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; @@ -60,10 +63,16 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Collectors; +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.CloseCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCursorCallback; @@ -74,6 +83,7 @@ import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.commons.lang3.mutable.MutableInt; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockAlwaysExpiredAuthenticationProvider; @@ -93,7 +103,9 @@ import org.apache.pulsar.broker.service.ServerCnx.State; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.utils.ClientChannelHelper; +import org.apache.pulsar.client.api.ProducerAccessMode; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.util.ConsumerName; import org.apache.pulsar.common.api.AuthData; import org.apache.pulsar.common.api.proto.AuthMethod; import org.apache.pulsar.common.api.proto.BaseCommand; @@ -113,6 +125,7 @@ import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespaceResponse; import org.apache.pulsar.common.api.proto.CommandLookupTopicResponse; import org.apache.pulsar.common.api.proto.CommandPartitionedTopicMetadataResponse; +import org.apache.pulsar.common.api.proto.CommandPing; import org.apache.pulsar.common.api.proto.CommandProducerSuccess; import org.apache.pulsar.common.api.proto.CommandSendError; import org.apache.pulsar.common.api.proto.CommandSendReceipt; @@ -135,6 +148,7 @@ import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.Commands.ChecksumType; import org.apache.pulsar.common.protocol.PulsarHandler; +import org.apache.pulsar.common.protocol.schema.EmptyVersion; import org.apache.pulsar.common.topics.TopicList; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; @@ -149,6 +163,7 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; +@Slf4j @SuppressWarnings("unchecked") @Test(groups = "broker") public class ServerCnxTest { @@ -184,10 +199,12 @@ public class ServerCnxTest { private ManagedLedger ledgerMock; private ManagedCursor cursorMock; + private ConcurrentHashSet channelsStoppedAnswerHealthCheck = new ConcurrentHashSet<>(); @BeforeMethod(alwaysRun = true) public void setup() throws Exception { + channelsStoppedAnswerHealthCheck.clear(); svcConfig = new ServiceConfiguration(); svcConfig.setBrokerShutdownTimeoutMs(0L); svcConfig.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -496,6 +513,41 @@ public void testConnectCommandWithPassingOriginalAuthData() throws Exception { channel.finish(); } + @Test(timeOut = 30000) + public void testConnectCommandWithPassingOriginalAuthDataAndSetAnonymousUserRole() throws Exception { + AuthenticationService authenticationService = mock(AuthenticationService.class); + AuthenticationProvider authenticationProvider = new MockAuthenticationProvider(); + String authMethodName = authenticationProvider.getAuthMethodName(); + + String anonymousUserRole = "admin"; + when(brokerService.getAuthenticationService()).thenReturn(authenticationService); + when(authenticationService.getAuthenticationProvider(authMethodName)).thenReturn(authenticationProvider); + when(authenticationService.getAnonymousUserRole()).thenReturn(Optional.of(anonymousUserRole)); + svcConfig.setAuthenticationEnabled(true); + svcConfig.setAuthenticateOriginalAuthData(true); + svcConfig.setProxyRoles(Collections.singleton("pass.proxy")); + svcConfig.setAnonymousUserRole(anonymousUserRole); + + resetChannel(); + assertTrue(channel.isActive()); + assertEquals(serverCnx.getState(), State.Start); + + // When both the proxy and the broker set the anonymousUserRole option + // the proxy will use anonymousUserRole to delegate the client's role when connecting. + ByteBuf clientCommand = Commands.newConnect(authMethodName, "pass.proxy", 1, null, + null, anonymousUserRole, null, null); + channel.writeInbound(clientCommand); + + Object response1 = getResponse(); + assertTrue(response1 instanceof CommandConnected); + assertEquals(serverCnx.getState(), State.Connected); + assertEquals(serverCnx.getAuthRole(), anonymousUserRole); + assertEquals(serverCnx.getPrincipal(), anonymousUserRole); + assertEquals(serverCnx.getOriginalPrincipal(), anonymousUserRole); + assertTrue(serverCnx.isActive()); + channel.finish(); + } + @Test(timeOut = 30000) public void testConnectCommandWithPassingOriginalPrincipal() throws Exception { AuthenticationService authenticationService = mock(AuthenticationService.class); @@ -927,6 +979,324 @@ public void testVerifyOriginalPrincipalWithAuthDataForwardedFromProxy() throws E })); } + @Test + public void testDuplicateProducer() throws Exception { + final String tName = successTopicName; + final long producerId = 1; + final MutableInt requestId = new MutableInt(1); + final MutableInt epoch = new MutableInt(1); + final Map metadata = Collections.emptyMap(); + final String pName = "p1"; + resetChannel(); + setChannelConnected(); + + // The producer register using the first connection. + ByteBuf cmdProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel.writeInbound(cmdProducer1); + assertTrue(getResponse() instanceof CommandProducerSuccess); + PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(tName).get(); + assertNotNull(topicRef); + assertEquals(topicRef.getProducers().size(), 1); + + // Verify the second producer will be reject due to the previous one still is active. + // Every second try once, total 10 times, all requests should fail. + ClientChannel channel2 = new ClientChannel(); + BackGroundExecutor backGroundExecutor1 = startBackgroundExecutorForEmbeddedChannel(channel); + BackGroundExecutor autoResponseForHeartBeat = autoResponseForHeartBeat(channel, clientChannelHelper); + BackGroundExecutor backGroundExecutor2 = startBackgroundExecutorForEmbeddedChannel(channel2.channel); + setChannelConnected(channel2.serverCnx); + + for (int i = 0; i < 10; i++) { + ByteBuf cmdProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel2.channel.writeInbound(cmdProducer2); + Object response2 = getResponse(channel2.channel, channel2.clientChannelHelper); + assertTrue(response2 instanceof CommandError); + assertEquals(topicRef.getProducers().size(), 1); + assertTrue(channel.isActive()); + Thread.sleep(500); + } + + // cleanup. + autoResponseForHeartBeat.close(); + backGroundExecutor1.close(); + backGroundExecutor2.close(); + channel.finish(); + channel2.close(); + } + + @Test + public void testProducerChangeSocket() throws Exception { + final String tName = successTopicName; + final long producerId = 1; + final MutableInt requestId = new MutableInt(1); + final MutableInt epoch = new MutableInt(1); + final Map metadata = Collections.emptyMap(); + final String pName = "p1"; + resetChannel(); + setChannelConnected(); + + // The producer register using the first connection. + ByteBuf cmdProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel.writeInbound(cmdProducer1); + assertTrue(getResponse() instanceof CommandProducerSuccess); + PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(tName).get(); + assertNotNull(topicRef); + assertEquals(topicRef.getProducers().size(), 1); + + // Verify the second producer using a new connection will override the producer who using a stopped channel. + channelsStoppedAnswerHealthCheck.add(channel); + ClientChannel channel2 = new ClientChannel(); + BackGroundExecutor backGroundExecutor1 = startBackgroundExecutorForEmbeddedChannel(channel); + BackGroundExecutor backGroundExecutor2 = startBackgroundExecutorForEmbeddedChannel(channel2.channel); + setChannelConnected(channel2.serverCnx); + + ByteBuf cmdProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel2.channel.writeInbound(cmdProducer2); + Object response2 = getResponse(channel2.channel, channel2.clientChannelHelper); + assertTrue(response2 instanceof CommandProducerSuccess); + assertEquals(topicRef.getProducers().size(), 1); + + // cleanup. + channelsStoppedAnswerHealthCheck.clear(); + backGroundExecutor1.close(); + backGroundExecutor2.close(); + channel.finish(); + channel2.close(); + } + + @Test + public void testHandleConsumerAfterClientChannelInactive() throws Exception { + final String tName = successTopicName; + final long consumerId = 1; + final MutableInt requestId = new MutableInt(1); + final String sName = successSubName; + final String cName1 = ConsumerName.generateRandomName(); + final String cName2 = ConsumerName.generateRandomName(); + resetChannel(); + setChannelConnected(); + + // The producer register using the first connection. + ByteBuf cmdSubscribe1 = Commands.newSubscribe(tName, sName, consumerId, requestId.incrementAndGet(), + SubType.Exclusive, 0, cName1, 0); + channel.writeInbound(cmdSubscribe1); + assertTrue(getResponse() instanceof CommandSuccess); + PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(tName).get(); + assertNotNull(topicRef); + assertNotNull(topicRef.getSubscription(sName).getConsumers()); + assertEquals(topicRef.getSubscription(sName).getConsumers().size(), 1); + assertEquals(topicRef.getSubscription(sName).getConsumers().iterator().next().consumerName(), cName1); + + // Verify the second producer using a new connection will override the consumer who using a stopped channel. + channelsStoppedAnswerHealthCheck.add(channel); + ClientChannel channel2 = new ClientChannel(); + setChannelConnected(channel2.serverCnx); + ByteBuf cmdSubscribe2 = Commands.newSubscribe(tName, sName, consumerId, requestId.incrementAndGet(), + CommandSubscribe.SubType.Exclusive, 0, cName2, 0); + channel2.channel.writeInbound(cmdSubscribe2); + BackGroundExecutor backGroundExecutor = startBackgroundExecutorForEmbeddedChannel(channel); + + assertTrue(getResponse(channel2.channel, channel2.clientChannelHelper) instanceof CommandSuccess); + assertEquals(topicRef.getSubscription(sName).getConsumers().size(), 1); + assertEquals(topicRef.getSubscription(sName).getConsumers().iterator().next().consumerName(), cName2); + backGroundExecutor.close(); + + // cleanup. + channel.finish(); + channel2.close(); + } + + @Test + public void test2ndSubFailedIfDisabledConCheck() + throws Exception { + final String tName = successTopicName; + final long consumerId = 1; + final MutableInt requestId = new MutableInt(1); + final String sName = successSubName; + final String cName1 = ConsumerName.generateRandomName(); + final String cName2 = ConsumerName.generateRandomName(); + // Disabled connection check. + pulsar.getConfig().setConnectionLivenessCheckTimeoutMillis(-1); + resetChannel(); + setChannelConnected(); + + // The consumer register using the first connection. + ByteBuf cmdSubscribe1 = Commands.newSubscribe(tName, sName, consumerId, requestId.incrementAndGet(), + SubType.Exclusive, 0, cName1, 0); + channel.writeInbound(cmdSubscribe1); + assertTrue(getResponse() instanceof CommandSuccess); + PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(tName).orElse(null); + assertNotNull(topicRef); + assertNotNull(topicRef.getSubscription(sName).getConsumers()); + assertEquals(topicRef.getSubscription(sName).getConsumers().stream().map(Consumer::consumerName) + .collect(Collectors.toList()), Collections.singletonList(cName1)); + + // Verify the consumer using a new connection will override the consumer who using a stopped channel. + channelsStoppedAnswerHealthCheck.add(channel); + ClientChannel channel2 = new ClientChannel(); + setChannelConnected(channel2.serverCnx); + ByteBuf cmdSubscribe2 = Commands.newSubscribe(tName, sName, consumerId, requestId.incrementAndGet(), + CommandSubscribe.SubType.Exclusive, 0, cName2, 0); + channel2.channel.writeInbound(cmdSubscribe2); + BackGroundExecutor backGroundExecutor = startBackgroundExecutorForEmbeddedChannel(channel); + + // Since the feature "ConnectionLiveness" has been disabled, the fix + // by https://github.com/apache/pulsar/pull/21183 will not be affected, so the client will still get an error. + Object responseOfConnection2 = getResponse(channel2.channel, channel2.clientChannelHelper); + assertTrue(responseOfConnection2 instanceof CommandError); + assertTrue(((CommandError) responseOfConnection2).getMessage() + .contains("Exclusive consumer is already connected")); + assertEquals(topicRef.getSubscription(sName).getConsumers().size(), 1); + assertEquals(topicRef.getSubscription(sName).getConsumers().iterator().next().consumerName(), cName1); + backGroundExecutor.close(); + + // cleanup. + channel.finish(); + channel2.close(); + // Reset configuration. + pulsar.getConfig().setConnectionLivenessCheckTimeoutMillis(5000); + } + + /** + * When a channel typed "EmbeddedChannel", once we call channel.execute(runnable), there is no background thread + * to run it. + * So starting a background thread to trigger the tasks in the queue. + */ + private BackGroundExecutor startBackgroundExecutorForEmbeddedChannel(final EmbeddedChannel channel) { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + ScheduledFuture scheduledFuture = executor.scheduleWithFixedDelay(() -> { + channel.runPendingTasks(); + }, 100, 100, TimeUnit.MILLISECONDS); + return new BackGroundExecutor(executor, scheduledFuture); + } + + /** + * Auto answer `Pong` for the `Cmd-Ping`. + * Node: This will result in additional threads pop Command from the Command queue, so do not call this + * method if the channel needs to accept other Command. + */ + private BackGroundExecutor autoResponseForHeartBeat(EmbeddedChannel channel, + ClientChannelHelper clientChannelHelper) { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + ScheduledFuture scheduledFuture = executor.scheduleWithFixedDelay(() -> { + tryPeekResponse(channel, clientChannelHelper); + }, 100, 100, TimeUnit.MILLISECONDS); + return new BackGroundExecutor(executor, scheduledFuture); + } + + @AllArgsConstructor + private static class BackGroundExecutor implements Closeable { + + private ScheduledExecutorService executor; + + private ScheduledFuture scheduledFuture; + + @Override + public void close() throws IOException { + if (scheduledFuture != null) { + scheduledFuture.cancel(true); + } + executor.shutdown(); + } + } + + private class ClientChannel implements Closeable { + private ClientChannelHelper clientChannelHelper = new ClientChannelHelper(); + private ServerCnx serverCnx = new ServerCnx(pulsar); + private EmbeddedChannel channel = new EmbeddedChannel(DefaultChannelId.newInstance(), + new LengthFieldBasedFrameDecoder( + 5 * 1024 * 1024, + 0, + 4, + 0, + 4), + serverCnx); + public ClientChannel() { + serverCnx.setAuthRole(""); + } + public void close(){ + if (channel != null && channel.isActive()) { + serverCnx.close(); + channel.close(); + } + } + } + + @Test + public void testHandleProducer() throws Exception { + final String tName = "persistent://public/default/test-topic"; + final long producerId = 1; + final MutableInt requestId = new MutableInt(1); + final MutableInt epoch = new MutableInt(1); + final Map metadata = Collections.emptyMap(); + final String pName = "p1"; + resetChannel(); + assertTrue(channel.isActive()); + assertEquals(serverCnx.getState(), State.Start); + + // connect. + ByteBuf cConnect = Commands.newConnect("none", "", null); + channel.writeInbound(cConnect); + assertEquals(serverCnx.getState(), State.Connected); + assertTrue(getResponse() instanceof CommandConnected); + + // There is an in-progress producer registration. + ByteBuf cProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + CompletableFuture existingFuture1 = new CompletableFuture(); + serverCnx.getProducers().put(producerId, existingFuture1); + channel.writeInbound(cProducer1); + Object response1 = getResponse(); + assertTrue(response1 instanceof CommandError); + CommandError error1 = (CommandError) response1; + assertEquals(error1.getError().toString(), ServerError.ServiceNotReady.toString()); + assertTrue(error1.getMessage().contains("already present on the connection")); + + // There is a failed registration. + ByteBuf cProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + CompletableFuture existingFuture2 = new CompletableFuture(); + existingFuture2.completeExceptionally(new BrokerServiceException.ProducerBusyException("123")); + serverCnx.getProducers().put(producerId, existingFuture2); + + channel.writeInbound(cProducer2); + Object response2 = getResponse(); + assertTrue(response2 instanceof CommandError); + CommandError error2 = (CommandError) response2; + assertEquals(error2.getError().toString(), ServerError.ProducerBusy.toString()); + assertTrue(error2.getMessage().contains("already failed to register present on the connection")); + + // There is an successful registration. + ByteBuf cProducer3 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + CompletableFuture existingFuture3 = new CompletableFuture(); + org.apache.pulsar.broker.service.Producer serviceProducer = + mock(org.apache.pulsar.broker.service.Producer.class); + when(serviceProducer.getProducerName()).thenReturn(pName); + when(serviceProducer.getSchemaVersion()).thenReturn(new EmptyVersion()); + existingFuture3.complete(serviceProducer); + serverCnx.getProducers().put(producerId, existingFuture3); + + channel.writeInbound(cProducer3); + Object response3 = getResponse(); + assertTrue(response3 instanceof CommandProducerSuccess); + CommandProducerSuccess cProducerSuccess = (CommandProducerSuccess) response3; + assertEquals(cProducerSuccess.getProducerName(), pName); + + // cleanup. + channel.finish(); + } + // This test used to be in the ServerCnxAuthorizationTest class, but it was migrated here because the mocking // in that class was too extensive. There is some overlap with this test and other tests in this class. The primary // role of this test is verifying that the correct role and AuthenticationDataSource are passed to the @@ -1637,9 +2007,11 @@ public void testDuplicateConcurrentSubscribeCommand() throws Exception { "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(clientCommand); + BackGroundExecutor backGroundExecutor = startBackgroundExecutorForEmbeddedChannel(channel); + // Create producer second time clientCommand = Commands.newSubscribe(successTopicName, // - successSubName, 2 /* consumer id */, 1 /* request id */, SubType.Exclusive, 0, + successSubName, 2 /* consumer id */, 2 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(clientCommand); @@ -1649,6 +2021,9 @@ public void testDuplicateConcurrentSubscribeCommand() throws Exception { CommandError error = (CommandError) response; assertEquals(error.getError(), ServerError.ConsumerBusy); }); + + // cleanup. + backGroundExecutor.close(); channel.finish(); } @@ -2471,6 +2846,10 @@ protected void resetChannel() throws Exception { } protected void setChannelConnected() throws Exception { + setChannelConnected(serverCnx); + } + + protected void setChannelConnected(ServerCnx serverCnx) throws Exception { Field channelState = ServerCnx.class.getDeclaredField("state"); channelState.setAccessible(true); channelState.set(serverCnx, State.Connected); @@ -2484,13 +2863,25 @@ private void setConnectionVersion(int version) throws Exception { } protected Object getResponse() throws Exception { + return getResponse(channel, clientChannelHelper); + } + + protected Object getResponse(EmbeddedChannel channel, ClientChannelHelper clientChannelHelper) throws Exception { // Wait at most for 10s to get a response final long sleepTimeMs = 10; final long iterations = TimeUnit.SECONDS.toMillis(10) / sleepTimeMs; for (int i = 0; i < iterations; i++) { if (!channel.outboundMessages().isEmpty()) { Object outObject = channel.outboundMessages().remove(); - return clientChannelHelper.getCommand(outObject); + Object cmd = clientChannelHelper.getCommand(outObject); + if (cmd instanceof CommandPing) { + if (channelsStoppedAnswerHealthCheck.contains(channel)) { + continue; + } + channel.writeInbound(Commands.newPong()); + continue; + } + return cmd; } else { Thread.sleep(sleepTimeMs); } @@ -2499,6 +2890,26 @@ protected Object getResponse() throws Exception { throw new IOException("Failed to get response from socket within 10s"); } + protected Object tryPeekResponse(EmbeddedChannel channel, ClientChannelHelper clientChannelHelper) { + while (true) { + if (channel.outboundMessages().isEmpty()) { + return null; + } else { + Object outObject = channel.outboundMessages().peek(); + Object cmd = clientChannelHelper.getCommand(outObject); + if (cmd instanceof CommandPing) { + if (channelsStoppedAnswerHealthCheck.contains(channel)) { + continue; + } + channel.writeInbound(Commands.newPong()); + channel.outboundMessages().remove(); + continue; + } + return cmd; + } + } + } + private void setupMLAsyncCallbackMocks() { ledgerMock = mock(ManagedLedger.class); cursorMock = mock(ManagedCursor.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java index f9fc717a817af..343a18da6d8a9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java @@ -18,9 +18,6 @@ */ package org.apache.pulsar.broker.service; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertNotNull; @@ -34,17 +31,20 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.BrokerServiceException.TopicPoliciesCacheNotInitException; -import org.apache.pulsar.broker.systopic.NamespaceEventsSystemTopicFactory; import org.apache.pulsar.broker.systopic.SystemTopicClient; -import org.apache.pulsar.broker.systopic.TopicPoliciesSystemTopicClient; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.BackoffBuilder; import org.apache.pulsar.common.events.PulsarEvent; @@ -53,20 +53,24 @@ import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; -import org.apache.pulsar.common.util.FutureUtil; +import org.assertj.core.api.Assertions; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @Test(groups = "broker") +@Slf4j public class SystemTopicBasedTopicPoliciesServiceTest extends MockedPulsarServiceBaseTest { private static final String NAMESPACE1 = "system-topic/namespace-1"; private static final String NAMESPACE2 = "system-topic/namespace-2"; private static final String NAMESPACE3 = "system-topic/namespace-3"; + private static final String NAMESPACE4 = "system-topic/namespace-4"; + private static final TopicName TOPIC1 = TopicName.get("persistent", NamespaceName.get(NAMESPACE1), "topic-1"); private static final TopicName TOPIC2 = TopicName.get("persistent", NamespaceName.get(NAMESPACE1), "topic-2"); private static final TopicName TOPIC3 = TopicName.get("persistent", NamespaceName.get(NAMESPACE2), "topic-1"); @@ -135,7 +139,7 @@ public void testGetPolicy() throws ExecutionException, InterruptedException, Top // Wait for all topic policies updated. Awaitility.await().untilAsserted(() -> Assert.assertTrue(systemTopicBasedTopicPoliciesService - .getPoliciesCacheInit(TOPIC1.getNamespaceObject()))); + .getPoliciesCacheInit(TOPIC1.getNamespaceObject()).isDone())); // Assert broker is cache all topic policies Awaitility.await().untilAsserted(() -> @@ -298,8 +302,8 @@ private void prepareData() throws PulsarAdminException { @Test public void testGetPolicyTimeout() throws Exception { SystemTopicBasedTopicPoliciesService service = (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); - Awaitility.await().untilAsserted(() -> assertTrue(service.policyCacheInitMap.get(TOPIC1.getNamespaceObject()))); - service.policyCacheInitMap.put(TOPIC1.getNamespaceObject(), false); + Awaitility.await().untilAsserted(() -> assertTrue(service.policyCacheInitMap.get(TOPIC1.getNamespaceObject()).isDone())); + service.policyCacheInitMap.put(TOPIC1.getNamespaceObject(), new CompletableFuture<>()); long start = System.currentTimeMillis(); Backoff backoff = new BackoffBuilder() .setInitialTime(500, TimeUnit.MILLISECONDS) @@ -315,28 +319,6 @@ public void testGetPolicyTimeout() throws Exception { assertTrue("actual:" + cost, cost >= 5000 - 1000); } - @Test - public void testCreatSystemTopicClientWithRetry() throws Exception { - SystemTopicBasedTopicPoliciesService service = - spy((SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService()); - Field field = SystemTopicBasedTopicPoliciesService.class - .getDeclaredField("namespaceEventsSystemTopicFactory"); - field.setAccessible(true); - NamespaceEventsSystemTopicFactory factory = spy((NamespaceEventsSystemTopicFactory) field.get(service)); - SystemTopicClient client = mock(TopicPoliciesSystemTopicClient.class); - doReturn(client).when(factory).createTopicPoliciesSystemTopicClient(any()); - field.set(service, factory); - - SystemTopicClient.Reader reader = mock(SystemTopicClient.Reader.class); - // Throw an exception first, create successfully after retrying - doReturn(FutureUtil.failedFuture(new PulsarClientException("test"))) - .doReturn(CompletableFuture.completedFuture(reader)).when(client).newReaderAsync(); - - SystemTopicClient.Reader reader1 = service.createSystemTopicClientWithRetry(null).get(); - - assertEquals(reader1, reader); - } - @Test public void testGetTopicPoliciesWithRetry() throws Exception { Field initMapField = SystemTopicBasedTopicPoliciesService.class.getDeclaredField("policyCacheInitMap"); @@ -384,4 +366,102 @@ public void testHandleNamespaceBeingDeleted() throws Exception { }); service.deleteTopicPoliciesAsync(TOPIC1).get(); } + + @Test + public void testGetTopicPoliciesWithCleanCache() throws Exception { + final String topic = "persistent://" + NAMESPACE1 + "/test" + UUID.randomUUID(); + pulsarClient.newProducer().topic(topic).create().close(); + + SystemTopicBasedTopicPoliciesService topicPoliciesService = + (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); + + ConcurrentHashMap spyPoliciesCache = spy(new ConcurrentHashMap()); + FieldUtils.writeDeclaredField(topicPoliciesService, "policiesCache", spyPoliciesCache, true); + + Awaitility.await().untilAsserted(() -> { + Assertions.assertThat(topicPoliciesService.getTopicPolicies(TopicName.get(topic))).isNull(); + }); + + admin.topicPolicies().setMaxConsumersPerSubscription(topic, 1); + Awaitility.await().untilAsserted(() -> { + Assertions.assertThat(pulsar.getTopicPoliciesService().getTopicPolicies(TopicName.get(topic))).isNotNull(); + }); + + Map>> readers = + (Map>>) + FieldUtils.readDeclaredField(topicPoliciesService, "readerCaches", true); + + Mockito.doAnswer(invocation -> { + Thread.sleep(1000); + return invocation.callRealMethod(); + }).when(spyPoliciesCache).get(Mockito.any()); + + CompletableFuture result = new CompletableFuture<>(); + Thread thread = new Thread(() -> { + TopicPolicies topicPolicies; + for (int i = 0; i < 10; i++) { + try { + topicPolicies = topicPoliciesService.getTopicPolicies(TopicName.get(topic)); + Assert.assertNotNull(topicPolicies); + Thread.sleep(500); + } catch (BrokerServiceException.TopicPoliciesCacheNotInitException e) { + log.warn("topic policies cache not init, retry..."); + } catch (Throwable e) { + log.error("ops: ", e); + result.completeExceptionally(e); + return; + } + } + result.complete(null); + }); + + Thread thread2 = new Thread(() -> { + for (int i = 0; i < 10; i++) { + CompletableFuture> readerCompletableFuture = + readers.get(TopicName.get(topic).getNamespaceObject()); + if (readerCompletableFuture != null) { + readerCompletableFuture.join().closeAsync().join(); + } + } + }); + + thread.start(); + thread2.start(); + + thread.join(); + thread2.join(); + + result.join(); + } + + @Test + public void testWriterCache() throws Exception { + admin.namespaces().createNamespace(NAMESPACE4); + for (int i = 1; i <= 5; i ++) { + final String topicName = "persistent://" + NAMESPACE4 + "/testWriterCache" + i; + admin.topics().createNonPartitionedTopic(topicName); + pulsarClient.newProducer(Schema.STRING).topic(topicName).create().close(); + } + @Cleanup("shutdown") + ExecutorService executorService = Executors.newFixedThreadPool(5); + for (int i = 1; i <= 5; i ++) { + int finalI = i; + executorService.execute(() -> { + final String topicName = "persistent://" + NAMESPACE4 + "/testWriterCache" + finalI; + try { + admin.topicPolicies().setMaxConsumers(topicName, 2); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + SystemTopicBasedTopicPoliciesService service = (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); + Assert.assertNotNull(service.getWriterCaches().synchronous().get(NamespaceName.get(NAMESPACE4))); + for (int i = 1; i <= 5; i ++) { + final String topicName = "persistent://" + NAMESPACE4 + "/testWriterCache" + i; + admin.topics().delete(topicName); + } + admin.namespaces().deleteNamespace(NAMESPACE4); + Assert.assertNull(service.getWriterCaches().synchronous().getIfPresent(NamespaceName.get(NAMESPACE4))); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicGCTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicGCTest.java new file mode 100644 index 0000000000000..7790940c1327f --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicGCTest.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.testng.Assert.assertTrue; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import lombok.EqualsAndHashCode; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.policies.data.InactiveTopicDeleteMode; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class TopicGCTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @EqualsAndHashCode.Include + protected void doInitConf() throws Exception { + super.doInitConf(); + this.conf.setBrokerDeleteInactiveTopicsEnabled(true); + this.conf.setBrokerDeleteInactiveTopicsMode(InactiveTopicDeleteMode.delete_when_subscriptions_caught_up); + this.conf.setBrokerDeleteInactiveTopicsFrequencySeconds(10); + } + + @Test + public void testCreateConsumerAfterOnePartDeleted() throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final String partition0 = topic + "-partition-0"; + final String partition1 = topic + "-partition-1"; + final String subscription = "s1"; + admin.topics().createPartitionedTopic(topic, 2); + admin.topics().createSubscription(topic, subscription, MessageId.earliest); + + // create consumers and producers. + Producer producer0 = pulsarClient.newProducer(Schema.STRING).topic(partition0) + .enableBatching(false).create(); + Producer producer1 = pulsarClient.newProducer(Schema.STRING).topic(partition1) + .enableBatching(false).create(); + org.apache.pulsar.client.api.Consumer consumer1 = pulsarClient.newConsumer(Schema.STRING).topic(topic) + .subscriptionName(subscription).isAckReceiptEnabled(true).subscribe(); + + // Make consume all messages for one topic, do not consume any messages for another one. + producer0.send("1"); + producer1.send("2"); + admin.topics().skipAllMessages(partition0, subscription); + + // Wait for topic GC. + // Partition 0 will be deleted about 20s later, left 2min to avoid flaky. + producer0.close(); + consumer1.close(); + Awaitility.await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> { + CompletableFuture> tp1 = pulsar.getBrokerService().getTopic(partition0, false); + CompletableFuture> tp2 = pulsar.getBrokerService().getTopic(partition1, false); + assertTrue(tp1 == null || !tp1.get().isPresent()); + assertTrue(tp2 != null && tp2.get().isPresent()); + }); + + // Verify that the consumer subscribed with partitioned topic can be created successful. + Consumer consumerAllPartition = pulsarClient.newConsumer(Schema.STRING).topic(topic) + .subscriptionName(subscription).isAckReceiptEnabled(true).subscribe(); + Message msg = consumerAllPartition.receive(2, TimeUnit.SECONDS); + String receivedMsgValue = msg.getValue(); + log.info("received msg: {}", receivedMsgValue); + consumerAllPartition.acknowledge(msg); + + // cleanup. + consumerAllPartition.close(); + producer0.close(); + producer1.close(); + admin.topics().deletePartitionedTopic(topic); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java index 5480a2e7a704a..54fec3934ddbc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java @@ -20,10 +20,13 @@ import static org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.CURSOR_INTERNAL_PROPERTY_PREFIX; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import com.google.common.collect.Multimap; import java.io.ByteArrayOutputStream; import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; @@ -32,6 +35,7 @@ import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.pulsar.broker.BrokerTestUtil; @@ -40,6 +44,7 @@ import org.apache.pulsar.broker.stats.PrometheusMetricsTest; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; @@ -47,6 +52,7 @@ import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @Test(groups = "broker") @@ -305,4 +311,169 @@ public void testBucketDelayedIndexMetrics() throws Exception { assertTrue(namespaceMetric.isPresent()); assertEquals(6, namespaceMetric.get().value); } + + @Test + public void testDelete() throws Exception { + String topic = BrokerTestUtil.newUniqueName("persistent://public/default/testDelete"); + + @Cleanup + Consumer c1 = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + for (int i = 0; i < 1000; i++) { + producer.newMessage() + .value("msg") + .deliverAfter(1, TimeUnit.HOURS) + .send(); + } + + Dispatcher dispatcher = pulsar.getBrokerService().getTopicReference(topic).get().getSubscription("sub").getDispatcher(); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(dispatcher.getNumberOfDelayedMessages(), 1000)); + + Map cursorProperties = + ((PersistentDispatcherMultipleConsumers) dispatcher).getCursor().getCursorProperties(); + List bucketIds = cursorProperties.entrySet().stream() + .filter(x -> x.getKey().startsWith(CURSOR_INTERNAL_PROPERTY_PREFIX + "delayed.bucket")).map( + x -> Long.valueOf(x.getValue())).toList(); + + assertTrue(bucketIds.size() > 0); + + admin.topics().delete(topic, true); + + for (Long bucketId : bucketIds) { + try { + LedgerHandle ledgerHandle = + pulsarTestContext.getBookKeeperClient() + .openLedger(bucketId, BookKeeper.DigestType.CRC32C, new byte[]{}); + Assert.fail("Should fail"); + } catch (BKException.BKNoSuchLedgerExistsException e) { + // ignore it + } + } + } + + @DataProvider(name = "subscriptionTypes") + public Object[][] subscriptionTypes() { + return new Object[][]{ + {SubscriptionType.Shared}, + {SubscriptionType.Key_Shared}, + {SubscriptionType.Failover}, + {SubscriptionType.Exclusive}, + }; + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeleteTopicIfCursorPropsEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createNonPartitionedTopic(topic); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + ManagedCursorImpl cursor = findCursor(topic, subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().delete(topic); + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeletePartitionedTopicIfCursorPropsEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createPartitionedTopic(topic, 2); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + ManagedCursorImpl cursor = findCursor(topic + "-partition-0", subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().deletePartitionedTopic(topic); + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeleteTopicIfCursorPropsNotEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createNonPartitionedTopic(topic); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + ManagedCursorImpl cursor = findCursor(topic, subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Put a subscription prop. + Map properties = new HashMap<>(); + properties.put("ignore", "ignore"); + admin.topics().updateSubscriptionProperties(topic, subscriptionName, properties); + assertTrue(cursor.getCursorProperties() != null && !cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().delete(topic); + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeletePartitionedTopicIfCursorPropsNotEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createPartitionedTopic(topic, 2); + pulsarClient.newProducer().topic(topic).create().close(); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + + ManagedCursorImpl cursor = findCursor(topic + "-partition-0", subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Put a subscription prop. + Map properties = new HashMap<>(); + properties.put("ignore", "ignore"); + admin.topics().updateSubscriptionProperties(topic, subscriptionName, properties); + assertTrue(cursor.getCursorProperties() != null && !cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().deletePartitionedTopic(topic); + } + + + private ManagedCursorImpl findCursor(String topic, String subscriptionName) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topic, false).join().get(); + Iterator cursorIterator = persistentTopic.getManagedLedger().getCursors().iterator(); + while (cursorIterator.hasNext()) { + ManagedCursor managedCursor = cursorIterator.next(); + if (managedCursor == null || !managedCursor.getName().equals(subscriptionName)) { + continue; + } + return (ManagedCursorImpl) managedCursor; + } + return null; + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java index b4152b143ab6c..72eb3e8101fde 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java @@ -32,10 +32,12 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; import io.netty.buffer.ByteBuf; import io.netty.channel.EventLoopGroup; import java.lang.reflect.Field; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedger; @@ -46,15 +48,22 @@ import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.broker.service.BacklogQuotaManager; import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @Slf4j @Test(groups = "broker") -public class MessageDuplicationTest { +public class MessageDuplicationTest extends BrokerTestBase { private static final int BROKER_DEDUPLICATION_ENTRIES_INTERVAL = 10; private static final int BROKER_DEDUPLICATION_MAX_NUMBER_PRODUCERS = 10; @@ -174,7 +183,7 @@ public void testInactiveProducerRemove() throws Exception { Field field = MessageDeduplication.class.getDeclaredField("inactiveProducers"); field.setAccessible(true); - Map inactiveProducers = (Map) field.get(messageDeduplication); + Map inactiveProducers = (ConcurrentHashMap) field.get(messageDeduplication); String producerName1 = "test1"; when(publishContext.getHighestSequenceId()).thenReturn(2L); @@ -437,4 +446,43 @@ public void completed(Exception e, long ledgerId, long entryId) { } }); } + + @BeforeMethod(alwaysRun = true) + @Override + protected void setup() throws Exception { + this.conf.setBrokerDeduplicationEnabled(true); + super.baseSetup(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testMessageDeduplication() throws Exception { + String topicName = "persistent://prop/ns-abc/testMessageDeduplication"; + String producerName = "test-producer"; + Producer producer = pulsarClient + .newProducer(Schema.STRING) + .producerName(producerName) + .topic(topicName) + .create(); + final PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(topicName).get().orElse(null); + assertNotNull(persistentTopic); + final MessageDeduplication messageDeduplication = persistentTopic.getMessageDeduplication(); + assertFalse(messageDeduplication.getInactiveProducers().containsKey(producerName)); + producer.close(); + Awaitility.await().untilAsserted(() -> assertTrue(messageDeduplication.getInactiveProducers().containsKey(producerName))); + admin.topicPolicies().setDeduplicationStatus(topicName, false); + Awaitility.await().untilAsserted(() -> { + final Boolean deduplicationStatus = admin.topicPolicies().getDeduplicationStatus(topicName); + Assert.assertNotNull(deduplicationStatus); + Assert.assertFalse(deduplicationStatus); + }); + messageDeduplication.purgeInactiveProducers(); + assertTrue(messageDeduplication.getInactiveProducers().isEmpty()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java index 48a4bfc923608..7e1b5f8c71e6d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java @@ -250,7 +250,7 @@ public void testSkipRedeliverTemporally() { redeliverEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key1"))); final List readEntries = new ArrayList<>(); readEntries.add(EntryImpl.create(1, 2, createMessage("message2", 2, "key1"))); - readEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key2"))); + readEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key22"))); try { Field totalAvailablePermitsField = PersistentDispatcherMultipleConsumers.class.getDeclaredField("totalAvailablePermits"); @@ -346,7 +346,7 @@ public void testMessageRedelivery() throws Exception { // Messages with key1 are routed to consumer1 and messages with key2 are routed to consumer2 final List allEntries = new ArrayList<>(); - allEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key2"))); + allEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key22"))); allEntries.add(EntryImpl.create(1, 2, createMessage("message2", 2, "key1"))); allEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key1"))); allEntries.forEach(entry -> ((EntryImpl) entry).retain()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java index 401f52daa6291..87408598889e7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java @@ -92,7 +92,12 @@ public class PersistentSubscriptionTest { public void setup() throws Exception { pulsarTestContext = PulsarTestContext.builderForNonStartableContext() .spyByDefault() - .configCustomizer(config -> config.setTransactionCoordinatorEnabled(true)) + .configCustomizer(config -> { + config.setTransactionCoordinatorEnabled(true); + config.setTransactionPendingAckStoreProviderClassName( + CustomTransactionPendingAckStoreProvider.class.getName()); + config.setTransactionBufferProviderClassName(InMemTransactionBufferProvider.class.getName()); + }) .useTestPulsarResources() .build(); @@ -100,56 +105,6 @@ public void setup() throws Exception { doReturn(Optional.of(new Policies())).when(namespaceResources) .getPoliciesIfCached(any()); - doReturn(new InMemTransactionBufferProvider()).when(pulsarTestContext.getPulsarService()) - .getTransactionBufferProvider(); - doReturn(new TransactionPendingAckStoreProvider() { - @Override - public CompletableFuture newPendingAckStore(PersistentSubscription subscription) { - return CompletableFuture.completedFuture(new PendingAckStore() { - @Override - public void replayAsync(PendingAckHandleImpl pendingAckHandle, ExecutorService executorService) { - try { - Field field = PendingAckHandleState.class.getDeclaredField("state"); - field.setAccessible(true); - field.set(pendingAckHandle, PendingAckHandleState.State.Ready); - } catch (NoSuchFieldException | IllegalAccessException e) { - fail(); - } - } - - @Override - public CompletableFuture closeAsync() { - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture appendIndividualAck(TxnID txnID, List> positions) { - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture appendCumulativeAck(TxnID txnID, PositionImpl position) { - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture appendCommitMark(TxnID txnID, AckType ackType) { - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture appendAbortMark(TxnID txnID, AckType ackType) { - return CompletableFuture.completedFuture(null); - } - }); - } - - @Override - public CompletableFuture checkInitializedBefore(PersistentSubscription subscription) { - return CompletableFuture.completedFuture(true); - } - }).when(pulsarTestContext.getPulsarService()).getTransactionPendingAckStoreProvider(); - ledgerMock = mock(ManagedLedgerImpl.class); cursorMock = mock(ManagedCursorImpl.class); managedLedgerConfigMock = mock(ManagedLedgerConfig.class); @@ -279,4 +234,53 @@ public void testAcknowledgeUpdateCursorLastActive() throws Exception { // `acknowledgeMessage` should update cursor last active assertTrue(persistentSubscription.cursor.getLastActive() > beforeAcknowledgeTimestamp); } + + public static class CustomTransactionPendingAckStoreProvider implements TransactionPendingAckStoreProvider { + @Override + public CompletableFuture newPendingAckStore(PersistentSubscription subscription) { + return CompletableFuture.completedFuture(new PendingAckStore() { + @Override + public void replayAsync(PendingAckHandleImpl pendingAckHandle, ExecutorService executorService) { + try { + Field field = PendingAckHandleState.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(pendingAckHandle, PendingAckHandleState.State.Ready); + } catch (NoSuchFieldException | IllegalAccessException e) { + fail(); + } + } + + @Override + public CompletableFuture closeAsync() { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture appendIndividualAck(TxnID txnID, + List> positions) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture appendCumulativeAck(TxnID txnID, PositionImpl position) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture appendCommitMark(TxnID txnID, AckType ackType) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture appendAbortMark(TxnID txnID, AckType ackType) { + return CompletableFuture.completedFuture(null); + } + }); + } + + @Override + public CompletableFuture checkInitializedBefore(PersistentSubscription subscription) { + return CompletableFuture.completedFuture(true); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java index 412b8207e34c7..4b4aa5b45d31a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java @@ -45,6 +45,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -60,6 +61,7 @@ import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.broker.service.TopicPoliciesService; import org.apache.pulsar.broker.stats.PrometheusMetricsTest; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -77,7 +79,9 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicStats; import org.awaitility.Awaitility; import org.junit.Assert; @@ -291,7 +295,8 @@ public void testPersistentPartitionedTopicUnload() throws Exception { assertFalse(pulsar.getBrokerService().getTopics().containsKey(topicName)); pulsar.getBrokerService().getTopicIfExists(topicName).get(); - assertTrue(pulsar.getBrokerService().getTopics().containsKey(topicName)); + // The map topics should only contain partitions, does not contain partitioned topic. + assertFalse(pulsar.getBrokerService().getTopics().containsKey(topicName)); // ref of partitioned-topic name should be empty assertFalse(pulsar.getBrokerService().getTopicReference(topicName).isPresent()); @@ -453,8 +458,7 @@ public void testCreateNonExistentPartitions() throws PulsarAdminException, Pulsa .topic(partition.toString()) .create(); fail("unexpected behaviour"); - } catch (PulsarClientException.TopicDoesNotExistException ignored) { - + } catch (PulsarClientException.NotAllowedException ex) { } Assert.assertEquals(admin.topics().getPartitionedTopicMetadata(topicName).partitions, 4); } @@ -603,4 +607,58 @@ public void testCreateTopicWithZombieReplicatorCursor(boolean topicLevelPolicy) return !topic.getManagedLedger().getCursors().iterator().hasNext(); }); } + + @Test + public void testCheckPersistencePolicies() throws Exception { + final String myNamespace = "prop/ns"; + admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); + final String topic = "persistent://" + myNamespace + "/testConfig" + UUID.randomUUID(); + conf.setForceDeleteNamespaceAllowed(true); + pulsarClient.newProducer().topic(topic).create().close(); + RetentionPolicies retentionPolicies = new RetentionPolicies(1, 1); + PersistentTopic persistentTopic = spy((PersistentTopic) pulsar.getBrokerService().getTopicIfExists(topic).get().get()); + TopicPoliciesService policiesService = spy(pulsar.getTopicPoliciesService()); + doReturn(policiesService).when(pulsar).getTopicPoliciesService(); + TopicPolicies policies = new TopicPolicies(); + policies.setRetentionPolicies(retentionPolicies); + doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(policiesService).getTopicPoliciesAsync(TopicName.get(topic)); + persistentTopic.onUpdate(policies); + verify(persistentTopic, times(1)).checkPersistencePolicies(); + Awaitility.await().untilAsserted(() -> { + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionSizeInMB(), 1L); + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionTimeMillis(), TimeUnit.MINUTES.toMillis(1)); + }); + // throw exception + doReturn(CompletableFuture.failedFuture(new RuntimeException())).when(persistentTopic).checkPersistencePolicies(); + policies.setRetentionPolicies(new RetentionPolicies(2, 2)); + persistentTopic.onUpdate(policies); + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionSizeInMB(), 1L); + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionTimeMillis(), TimeUnit.MINUTES.toMillis(1)); + } + + @Test + public void testDynamicConfigurationAutoSkipNonRecoverableData() throws Exception { + pulsar.getConfiguration().setAutoSkipNonRecoverableData(false); + final String topicName = "persistent://prop/ns-abc/testAutoSkipNonRecoverableData"; + final String subName = "test_sub"; + + Consumer subscribe = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).subscribe(); + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).join().get(); + PersistentSubscription subscription = persistentTopic.getSubscription(subName); + + assertFalse(persistentTopic.ledger.getConfig().isAutoSkipNonRecoverableData()); + assertFalse(subscription.getExpiryMonitor().isAutoSkipNonRecoverableData()); + + String key = "autoSkipNonRecoverableData"; + admin.brokers().updateDynamicConfiguration(key, "true"); + Awaitility.await() + .untilAsserted(() -> assertEquals(admin.brokers().getAllDynamicConfigurations().get(key), "true")); + + assertTrue(persistentTopic.ledger.getConfig().isAutoSkipNonRecoverableData()); + assertTrue(subscription.getExpiryMonitor().isAutoSkipNonRecoverableData()); + + subscribe.close(); + admin.topics().delete(topicName); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java index e57092d02dd5d..16721ca1203fd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java @@ -492,6 +492,43 @@ public void testDisableNamespacePolicyTakeSnapshot() throws Exception { } + @Test(timeOut = 30000) + public void testDisableNamespacePolicyTakeSnapshotShouldNotThrowException() throws Exception { + cleanup(); + conf.setBrokerDeduplicationEnabled(true); + conf.setBrokerDeduplicationSnapshotFrequencyInSeconds(1); + conf.setBrokerDeduplicationSnapshotIntervalSeconds(1); + conf.setBrokerDeduplicationEntriesInterval(20000); + setup(); + + final String topicName = testTopic + UUID.randomUUID().toString(); + final String producerName = "my-producer"; + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.STRING).topic(topicName).enableBatching(false).producerName(producerName).create(); + + // disable deduplication + admin.namespaces().setDeduplicationStatus(myNamespace, false); + + int msgNum = 50; + CountDownLatch countDownLatch = new CountDownLatch(msgNum); + for (int i = 0; i < msgNum; i++) { + producer.newMessage().value("msg" + i).sendAsync().whenComplete((res, e) -> countDownLatch.countDown()); + } + countDownLatch.await(); + PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(topicName).get().get(); + ManagedCursor managedCursor = persistentTopic.getMessageDeduplication().getManagedCursor(); + + // when disable topic deduplication the cursor should be deleted. + assertNull(managedCursor); + + // this method will be called at brokerService forEachTopic. + // if topic level disable deduplication. + // this method should be skipped without throw exception. + persistentTopic.checkDeduplicationSnapshot(); + } + private void waitCacheInit(String topicName) throws Exception { pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub").subscribe().close(); TopicName topic = TopicName.get(topicName); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java index 4b9d91fbde219..1c4f88bc0273c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java @@ -22,6 +22,7 @@ import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.apache.pulsar.client.api.SubscriptionInitialPosition.Earliest; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -30,8 +31,9 @@ import static org.testng.Assert.assertTrue; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertNotNull; - +import io.netty.buffer.ByteBuf; import java.lang.reflect.Field; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -39,7 +41,6 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - import lombok.Cleanup; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; @@ -51,17 +52,22 @@ import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.service.Dispatcher; import org.apache.pulsar.broker.service.EntryFilterSupport; +import org.apache.pulsar.broker.service.Subscription; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.RawMessage; +import org.apache.pulsar.client.api.RawReader; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.stats.AnalyzeSubscriptionBacklogResult; +import org.apache.pulsar.compaction.Compactor; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -149,6 +155,58 @@ public void testOverride() throws Exception { consumer.close(); } + @Test + public void testEntryFilterWithCompactor() throws Exception { + conf.setAllowOverrideEntryFilters(true); + String topic = "persistent://prop/ns-abc/topic" + UUID.randomUUID(); + + List messages = new ArrayList<>(); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topic).create(); + producer.newMessage().key("K1").value("V1").send(); + producer.newMessage().key("K2").value("V2").send(); + producer.newMessage().key("K3").value("V3").send(); + producer.newMessage().key("K4").value("V4").send(); + messages.add("V2"); + messages.add("V4"); + + PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + + // set topic level entry filters + EntryFilter mockFilter = mock(EntryFilter.class); + doAnswer(invocationOnMock -> { + FilterContext filterContext = invocationOnMock.getArgument(1); + String partitionKey = filterContext.getMsgMetadata().getPartitionKey(); + if (partitionKey.equals("K1") || partitionKey.equals("K3")) { + return EntryFilter.FilterResult.REJECT; + } else { + return EntryFilter.FilterResult.ACCEPT; + } + }).when(mockFilter).filterEntry(any(Entry.class), any(FilterContext.class)); + setMockFilterToTopic(topicRef, List.of(mockFilter)); + + List results = new ArrayList<>(); + RawReader rawReader = RawReader.create(pulsarClient, topic, Compactor.COMPACTION_SUBSCRIPTION).get(); + while (true) { + boolean hasMsg = rawReader.hasMessageAvailableAsync().get(); + if (hasMsg) { + try (RawMessage m = rawReader.readNextAsync().get()) { + ByteBuf headersAndPayload = m.getHeadersAndPayload(); + Commands.skipMessageMetadata(headersAndPayload); + byte[] bytes = new byte[headersAndPayload.readableBytes()]; + headersAndPayload.readBytes(bytes); + + results.add(new String(bytes)); + } + } else { + break; + } + } + rawReader.closeAsync().get(); + + Assert.assertEquals(messages, results); + } + @SneakyThrows private void setMockFilterToTopic(PersistentTopic topicRef, List mockFilter) { FieldUtils.writeField(topicRef, "entryFilters", Pair.of(null, mockFilter), true); @@ -286,10 +344,16 @@ public void testFilter() throws Exception { } + @DataProvider(name = "topicProvider") + public Object[][] topicProvider() { + return new Object[][]{ + {"persistent://prop/ns-abc/topic" + UUID.randomUUID()}, + {"non-persistent://prop/ns-abc/topic" + UUID.randomUUID()}, + }; + } - @Test - public void testFilteredMsgCount() throws Throwable { - String topic = "persistent://prop/ns-abc/topic" + UUID.randomUUID(); + @Test(dataProvider = "topicProvider") + public void testFilteredMsgCount(String topic) throws Throwable { String subName = "sub"; try (Producer producer = pulsarClient.newProducer(Schema.STRING) @@ -298,7 +362,7 @@ public void testFilteredMsgCount() throws Throwable { .subscriptionName(subName).subscribe()) { // mock entry filters - PersistentSubscription subscription = (PersistentSubscription) pulsar.getBrokerService() + Subscription subscription = pulsar.getBrokerService() .getTopicReference(topic).get().getSubscription(subName); Dispatcher dispatcher = subscription.getDispatcher(); Field field = EntryFilterSupport.class.getDeclaredField("entryFilters"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java index c7e30d5c3fc37..fbf734f331f2b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service.schema; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.fail; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertFalse; @@ -40,16 +41,23 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.schema.SchemaRegistry.SchemaAndMetadata; import org.apache.pulsar.broker.stats.PrometheusMetricsTest; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.impl.schema.KeyValueSchemaInfo; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; +import org.apache.pulsar.common.protocol.schema.IsCompatibilityResponse; import org.apache.pulsar.common.protocol.schema.SchemaData; import org.apache.pulsar.common.protocol.schema.SchemaVersion; +import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.common.schema.LongSchemaVersion; +import org.apache.pulsar.common.schema.SchemaInfo; +import org.apache.pulsar.common.schema.SchemaInfoWithVersion; import org.apache.pulsar.common.schema.SchemaType; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -93,6 +101,7 @@ protected void setup() throws Exception { Map checkMap = new HashMap<>(); checkMap.put(SchemaType.AVRO, new AvroSchemaCompatibilityCheck()); schemaRegistryService = new SchemaRegistryServiceImpl(storage, checkMap, MockClock, null); + setupDefaultTenantAndNamespace(); } @AfterMethod(alwaysRun = true) @@ -385,4 +394,32 @@ private static SchemaData getSchemaData(String schemaJson) { private SchemaVersion version(long version) { return new LongSchemaVersion(version); } + + @Test + public void testKeyValueSchema() throws Exception { + final String topicName = "persistent://public/default/testKeyValueSchema"; + admin.topics().createNonPartitionedTopic(BrokerTestUtil.newUniqueName(topicName)); + + final SchemaInfo schemaInfo = KeyValueSchemaInfo.encodeKeyValueSchemaInfo( + "keyValue", + SchemaInfo.builder().type(SchemaType.STRING).schema(new byte[0]) + .build(), + SchemaInfo.builder().type(SchemaType.BOOLEAN).schema(new byte[0]) + .build(), KeyValueEncodingType.SEPARATED); + assertThrows(PulsarAdminException.ServerSideErrorException.class, () -> admin.schemas().testCompatibility(topicName, schemaInfo)); + admin.schemas().createSchema(topicName, schemaInfo); + + final IsCompatibilityResponse isCompatibilityResponse = admin.schemas().testCompatibility(topicName, schemaInfo); + Assert.assertTrue(isCompatibilityResponse.isCompatibility()); + + final SchemaInfoWithVersion schemaInfoWithVersion = admin.schemas().getSchemaInfoWithVersion(topicName); + Assert.assertEquals(schemaInfoWithVersion.getVersion(), 0); + + final Long version1 = admin.schemas().getVersionBySchema(topicName, schemaInfo); + Assert.assertEquals(version1, 0); + + final Long version2 = admin.schemas().getVersionBySchema(topicName, schemaInfoWithVersion.getSchemaInfo()); + Assert.assertEquals(version2, 0); + + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/TopicSchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/TopicSchemaTest.java new file mode 100644 index 0000000000000..66bfd1c3ec2b0 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/TopicSchemaTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service.schema; + +import static org.apache.pulsar.broker.service.schema.SchemaRegistry.SchemaAndMetadata; +import static org.testng.Assert.assertTrue; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.naming.TopicDomain; +import org.apache.pulsar.common.naming.TopicName; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class TopicSchemaTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @DataProvider(name = "topicDomains") + public Object[][] topicDomains() { + return new Object[][]{ + {TopicDomain.non_persistent}, + {TopicDomain.persistent} + }; + } + + @Test(dataProvider = "topicDomains") + public void testDeleteNonPartitionedTopicWithSchema(TopicDomain topicDomain) throws Exception { + final String topic = BrokerTestUtil.newUniqueName(topicDomain.value() + "://public/default/tp"); + final String schemaId = TopicName.get(TopicName.get(topic).getPartitionedTopicName()).getSchemaName(); + admin.topics().createNonPartitionedTopic(topic); + + // Add schema. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic) + .enableBatching(false).create(); + producer.close(); + List schemaList1 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList1 != null && schemaList1.size() > 0); + + // Verify the schema has been deleted with topic. + admin.topics().delete(topic, false); + List schemaList2 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList2 == null || schemaList2.isEmpty()); + } + + @Test + public void testDeletePartitionedTopicWithoutSchema() throws Exception { + // Non-persistent topic does not support partitioned topic now, so only write a test case for persistent topic. + TopicDomain topicDomain = TopicDomain.persistent; + final String topic = BrokerTestUtil.newUniqueName(topicDomain.value() + "://public/default/tp"); + final String partition0 = topic + "-partition-0"; + final String partition1 = topic + "-partition-1"; + final String schemaId = TopicName.get(TopicName.get(topic).getPartitionedTopicName()).getSchemaName(); + admin.topics().createPartitionedTopic(topic, 2); + + // Add schema. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic) + .enableBatching(false).create(); + producer.close(); + List schemaList1 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList1 != null && schemaList1.size() > 0); + + // Verify the schema will not been deleted with partition-0. + admin.topics().delete(partition0, false); + List schemaList2 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList2 != null && schemaList2.size() > 0); + + // Verify the schema will not been deleted with partition-0 & partition-1. + admin.topics().delete(partition1, false); + List schemaList3 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList3 != null && schemaList3.size() > 0); + + // Verify the schema will be deleted with partitioned metadata. + admin.topics().deletePartitionedTopic(topic, false); + List schemaList4 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList4 == null || schemaList4.isEmpty()); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java index bf0dd3aa9c1c5..c8fce32efc5f0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java @@ -27,6 +27,8 @@ import org.apache.pulsar.common.api.proto.CommandEndTxnResponse; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespaceResponse; import org.apache.pulsar.common.api.proto.CommandPartitionedTopicMetadataResponse; +import org.apache.pulsar.common.api.proto.CommandPing; +import org.apache.pulsar.common.api.proto.CommandPong; import org.apache.pulsar.common.api.proto.CommandWatchTopicListSuccess; import org.apache.pulsar.common.protocol.PulsarDecoder; import org.apache.pulsar.common.api.proto.CommandAck; @@ -207,6 +209,16 @@ protected void handleEndTxnOnSubscriptionResponse( CommandEndTxnOnSubscriptionResponse commandEndTxnOnSubscriptionResponse) { queue.offer(new CommandEndTxnOnSubscriptionResponse().copyFrom(commandEndTxnOnSubscriptionResponse)); } + + @Override + protected void handlePing(CommandPing ping) { + queue.offer(new CommandPing().copyFrom(ping)); + } + + @Override + protected void handlePong(CommandPong pong) { + return; + } }; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java index bbeee9f5a497a..f29c643a8f50b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java @@ -446,4 +446,37 @@ public void testAvgMessagesPerEntry() throws Exception { int avgMessagesPerEntry = consumerStats.getAvgMessagesPerEntry(); assertEquals(3, avgMessagesPerEntry); } + + @Test() + public void testNonPersistentTopicSharedSubscriptionUnackedMessages() throws Exception { + final String topicName = "non-persistent://my-property/my-ns/my-topic" + UUID.randomUUID(); + final String subName = "my-sub"; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + for (int i = 0; i < 5; i++) { + producer.send(("message-" + i).getBytes()); + } + for (int i = 0; i < 5; i++) { + Message msg = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledge(msg); + } + TimeUnit.SECONDS.sleep(1); + + TopicStats topicStats = admin.topics().getStats(topicName); + assertEquals(1, topicStats.getSubscriptions().size()); + List consumers = topicStats.getSubscriptions().get(subName).getConsumers(); + assertEquals(1, consumers.size()); + assertEquals(0, consumers.get(0).getUnackedMessages()); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java index f5ae8459f1803..a7a28afd8ac64 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java @@ -38,6 +38,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -80,6 +81,7 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.compaction.Compactor; +import org.apache.zookeeper.CreateMode; import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; @@ -772,6 +774,10 @@ public void testBundlesMetrics() throws Exception { c1.acknowledge(c1.receive()); } + // Mock another broker to make split task work. + String mockedBroker = "/loadbalance/brokers/127.0.0.1:0"; + mockZooKeeper.create(mockedBroker, new byte[]{0}, Collections.emptyList(), CreateMode.EPHEMERAL); + pulsar.getBrokerService().updateRates(); Awaitility.await().untilAsserted(() -> assertTrue(pulsar.getBrokerService().getBundleStats().size() > 0)); ModularLoadManagerWrapper loadManager = (ModularLoadManagerWrapper)pulsar.getLoadManager().get(); @@ -796,6 +802,9 @@ public void testBundlesMetrics() throws Exception { assertTrue(metrics.containsKey("pulsar_lb_bandwidth_out_usage")); assertTrue(metrics.containsKey("pulsar_lb_bundles_split_total")); + + // cleanup. + mockZooKeeper.delete(mockedBroker, 0); } @Test @@ -1370,15 +1379,12 @@ public void testAuthMetrics() throws IOException, AuthenticationException { conf.setProperties(properties); provider.initialize(conf); - String authExceptionMessage = ""; - try { provider.authenticate(new AuthenticationDataSource() { }); fail("Should have failed"); } catch (AuthenticationException e) { // expected, no credential passed - authExceptionMessage = e.getMessage(); } String token = AuthTokenUtils.createToken(secretKey, "subject", Optional.empty()); @@ -1415,7 +1421,8 @@ public String getCommandData() { boolean haveFailed = false; for (Metric metric : cm) { if (Objects.equals(metric.tags.get("auth_method"), "token") - && Objects.equals(metric.tags.get("reason"), authExceptionMessage) + && Objects.equals(metric.tags.get("reason"), + AuthenticationProviderToken.ErrorCode.INVALID_AUTH_DATA.name()) && Objects.equals(metric.tags.get("provider_name"), provider.getClass().getSimpleName())) { haveFailed = true; } @@ -1946,4 +1953,30 @@ public String toString() { } } + @Test + public void testEscapeLabelValue() throws Exception { + String ns1 = "prop/ns-abc1"; + admin.namespaces().createNamespace(ns1); + String topic = "persistent://" + ns1 + "/\"mytopic"; + admin.topics().createNonPartitionedTopic(topic); + + @Cleanup + final Consumer consumer = pulsarClient.newConsumer() + .subscriptionName("sub") + .topic(topic) + .subscribe(); + @Cleanup + ByteArrayOutputStream statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, true, false, + false, statsOut); + String metricsStr = statsOut.toString(); + final List subCountLines = metricsStr.lines() + .filter(line -> line.startsWith("pulsar_subscriptions_count")) + .collect(Collectors.toList()); + System.out.println(subCountLines); + assertEquals(subCountLines.size(), 1); + assertEquals(subCountLines.get(0), + "pulsar_subscriptions_count{cluster=\"test\",namespace=\"prop/ns-abc1\",topic=\"persistent://prop/ns-abc1/\\\"mytopic\"} 1"); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java index bf9c1d540bf87..d5e0066a86f15 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/SubscriptionStatsTest.java @@ -211,21 +211,22 @@ public void testSubscriptionStats(final String topic, final String subName, bool hasFilterField.set(dispatcher, true); } - for (int i = 0; i < 100; i++) { - producer.newMessage().property("ACCEPT", " ").value(UUID.randomUUID().toString()).send(); - } - for (int i = 0; i < 100; i++) { + int rejectedCount = 100; + int acceptCount = 100; + int scheduleCount = 100; + for (int i = 0; i < rejectedCount; i++) { producer.newMessage().property("REJECT", " ").value(UUID.randomUUID().toString()).send(); } - for (int i = 0; i < 100; i++) { + for (int i = 0; i < acceptCount; i++) { + producer.newMessage().property("ACCEPT", " ").value(UUID.randomUUID().toString()).send(); + } + for (int i = 0; i < scheduleCount; i++) { producer.newMessage().property("RESCHEDULE", " ").value(UUID.randomUUID().toString()).send(); } - for (;;) { - Message message = consumer.receive(10, TimeUnit.SECONDS); - if (message == null) { - break; - } + for (int i = 0; i < acceptCount; i++) { + Message message = consumer.receive(1, TimeUnit.SECONDS); + Assert.assertNotNull(message); consumer.acknowledge(message); } @@ -263,12 +264,12 @@ public void testSubscriptionStats(final String topic, final String subName, bool .mapToDouble(m-> m.value).sum(); if (setFilter) { - Assert.assertEquals(filterAccepted, 100); - if (isPersistent) { - Assert.assertEquals(filterRejected, 100); - // Only works on the test, if there are some markers, the filterProcessCount will be not equal with rejectedCount + rescheduledCount + acceptCount - Assert.assertEquals(throughFilter, filterAccepted + filterRejected + filterRescheduled, 0.01 * throughFilter); - } + Assert.assertEquals(filterAccepted, acceptCount); + Assert.assertEquals(filterRejected, rejectedCount); + // Only works on the test, if there are some markers, + // the filterProcessCount will be not equal with rejectedCount + rescheduledCount + acceptCount + Assert.assertEquals(throughFilter, + filterAccepted + filterRejected + filterRescheduled, 0.01 * throughFilter); } else { Assert.assertEquals(throughFilter, 0D); Assert.assertEquals(filterAccepted, 0D); @@ -282,19 +283,20 @@ public void testSubscriptionStats(final String topic, final String subName, bool Assert.assertEquals(rescheduledMetrics.size(), 0); } - testSubscriptionStatsAdminApi(topic, subName, setFilter); + testSubscriptionStatsAdminApi(topic, subName, setFilter, acceptCount, rejectedCount); } - private void testSubscriptionStatsAdminApi(String topic, String subName, boolean setFilter) throws Exception { + private void testSubscriptionStatsAdminApi(String topic, String subName, boolean setFilter, + int acceptCount, int rejectedCount) throws Exception { boolean persistent = TopicName.get(topic).isPersistent(); TopicStats topicStats = admin.topics().getStats(topic); SubscriptionStats stats = topicStats.getSubscriptions().get(subName); Assert.assertNotNull(stats); if (setFilter) { - Assert.assertEquals(stats.getFilterAcceptedMsgCount(), 100); + Assert.assertEquals(stats.getFilterAcceptedMsgCount(), acceptCount); if (persistent) { - Assert.assertEquals(stats.getFilterRejectedMsgCount(), 100); + Assert.assertEquals(stats.getFilterRejectedMsgCount(), rejectedCount); // Only works on the test, if there are some markers, the filterProcessCount will be not equal with rejectedCount + rescheduledCount + acceptCount Assert.assertEquals(stats.getFilterProcessedMsgCount(), stats.getFilterAcceptedMsgCount() + stats.getFilterRejectedMsgCount() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java index b5933f9ecf529..0e12d75f74fa0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java @@ -20,10 +20,12 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; - +import java.util.HashMap; +import org.apache.bookkeeper.mledger.util.StatsBuckets; +import org.apache.pulsar.common.policies.data.stats.TopicMetricBean; import org.testng.annotations.Test; -@Test(groups = "broker") +@Test(groups = {"broker"}) public class AggregatedNamespaceStatsTest { @Test @@ -157,4 +159,101 @@ public void testSimpleAggregation() { assertEquals(nsSubStats.unackedMessages, 2); } + + @Test + public void testReset() { + AggregatedNamespaceStats stats = new AggregatedNamespaceStats(); + stats.topicsCount = 8; + stats.subscriptionsCount = 3; + stats.producersCount = 1; + stats.consumersCount = 8; + stats.rateIn = 1.3; + stats.rateOut = 3.5; + stats.throughputIn = 3.2; + stats.throughputOut = 5.8; + stats.messageAckRate = 12; + stats.bytesInCounter = 1234; + stats.msgInCounter = 3889; + stats.bytesOutCounter = 89775; + stats.msgOutCounter = 28983; + stats.msgBacklog = 39; + stats.msgDelayed = 31; + + stats.ongoingTxnCount = 87; + stats.abortedTxnCount = 74; + stats.committedTxnCount = 34; + + stats.backlogQuotaLimit = 387; + stats.backlogQuotaLimitTime = 8771; + + stats.replicationStats = new HashMap<>(); + stats.replicationStats.put("r", new AggregatedReplicationStats()); + + stats.subscriptionStats = new HashMap<>(); + stats.subscriptionStats.put("r", new AggregatedSubscriptionStats()); + + stats.compactionRemovedEventCount = 124; + stats.compactionSucceedCount = 487; + stats.compactionFailedCount = 84857; + stats.compactionDurationTimeInMills = 2384; + stats.compactionReadThroughput = 355423; + stats.compactionWriteThroughput = 23299; + stats.compactionCompactedEntriesCount = 37522; + stats.compactionCompactedEntriesSize = 8475; + + stats.compactionLatencyBuckets = new StatsBuckets(5); + stats.compactionLatencyBuckets.addValue(3); + + stats.delayedMessageIndexSizeInBytes = 45223; + + stats.bucketDelayedIndexStats = new HashMap<>(); + stats.bucketDelayedIndexStats.put("t", new TopicMetricBean()); + + stats.reset(); + + assertEquals(stats.bytesOutCounter, 0); + assertEquals(stats.topicsCount, 0); + assertEquals(stats.subscriptionsCount, 0); + assertEquals(stats.producersCount, 0); + assertEquals(stats.consumersCount, 0); + assertEquals(stats.rateIn, 0); + assertEquals(stats.rateOut, 0); + assertEquals(stats.throughputIn, 0); + assertEquals(stats.throughputOut, 0); + assertEquals(stats.messageAckRate, 0); + assertEquals(stats.bytesInCounter, 0); + assertEquals(stats.msgInCounter, 0); + assertEquals(stats.bytesOutCounter, 0); + assertEquals(stats.msgOutCounter, 0); + + assertEquals(stats.managedLedgerStats.storageSize, 0); + + assertEquals(stats.msgBacklog, 0); + assertEquals(stats.msgDelayed, 0); + + assertEquals(stats.ongoingTxnCount, 0); + assertEquals(stats.abortedTxnCount, 0); + assertEquals(stats.committedTxnCount, 0); + + assertEquals(stats.backlogQuotaLimit, 0); + assertEquals(stats.backlogQuotaLimitTime, -1); + + assertEquals(stats.replicationStats.size(), 0); + assertEquals(stats.subscriptionStats.size(), 0); + + assertEquals(stats.compactionRemovedEventCount, 0); + assertEquals(stats.compactionSucceedCount, 0); + assertEquals(stats.compactionFailedCount, 0); + assertEquals(stats.compactionDurationTimeInMills, 0); + assertEquals(stats.compactionReadThroughput, 0); + assertEquals(stats.compactionWriteThroughput, 0); + assertEquals(stats.compactionCompactedEntriesCount, 0); + assertEquals(stats.compactionCompactedEntriesSize, 0); + + assertEquals(stats.compactionLatencyBuckets.getSum(), 0); + + assertEquals(stats.delayedMessageIndexSizeInBytes, 0); + assertEquals(stats.bucketDelayedIndexStats.size(), 0); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java index 008c2143a3566..a2401ebe19a06 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.systopic; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; import com.google.common.collect.Sets; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -37,6 +39,7 @@ import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.service.schema.SchemaRegistry; import org.apache.pulsar.client.admin.ListTopicsOptions; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; @@ -55,6 +58,7 @@ import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicType; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; @@ -77,6 +81,7 @@ protected void setup() throws Exception { conf.setDefaultNumPartitions(PARTITIONS); conf.setManagedLedgerMaxEntriesPerLedger(1); conf.setBrokerDeleteInactiveTopicsEnabled(false); + conf.setTransactionCoordinatorEnabled(true); super.baseSetup(); } @@ -160,7 +165,7 @@ public void testProduceAndConsumeUnderSystemNamespace() throws Exception { @Test public void testHealthCheckTopicNotOffload() throws Exception { - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); TopicName topicName = TopicName.get("persistent", namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() @@ -180,18 +185,25 @@ public void testHealthCheckTopicNotOffload() throws Exception { @Test public void testSystemNamespaceNotCreateChangeEventsTopic() throws Exception { admin.brokers().healthcheck(TopicVersion.V2); - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); TopicName topicName = TopicName.get("persistent", namespaceName, SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME); Optional optionalTopic = pulsar.getBrokerService() .getTopic(topicName.getPartition(1).toString(), false).join(); Assert.assertTrue(optionalTopic.isEmpty()); + + TopicName heartbeatTopicName = TopicName.get("persistent", + namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + admin.topics().getRetention(heartbeatTopicName.toString()); + optionalTopic = pulsar.getBrokerService() + .getTopic(topicName.getPartition(1).toString(), false).join(); + Assert.assertTrue(optionalTopic.isEmpty()); } @Test public void testHeartbeatTopicNotAllowedToSendEvent() throws Exception { admin.brokers().healthcheck(TopicVersion.V2); - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); TopicName topicName = TopicName.get("persistent", namespaceName, SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME); for (int partition = 0; partition < PARTITIONS; partition ++) { @@ -203,6 +215,40 @@ public void testHeartbeatTopicNotAllowedToSendEvent() throws Exception { }); } + @Test + public void testHeartbeatTopicBeDeleted() throws Exception { + admin.brokers().healthcheck(TopicVersion.V2); + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), + pulsar.getConfig()); + TopicName heartbeatTopicName = TopicName.get("persistent", namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + + List topics = getPulsar().getNamespaceService().getListOfPersistentTopics(namespaceName).join(); + Assert.assertEquals(topics.size(), 1); + Assert.assertEquals(topics.get(0), heartbeatTopicName.toString()); + + admin.topics().delete(heartbeatTopicName.toString(), true); + topics = getPulsar().getNamespaceService().getListOfPersistentTopics(namespaceName).join(); + Assert.assertEquals(topics.size(), 0); + } + + @Test + public void testHeartbeatNamespaceNotCreateTransactionInternalTopic() throws Exception { + admin.brokers().healthcheck(TopicVersion.V2); + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), + pulsar.getConfig()); + TopicName topicName = TopicName.get("persistent", + namespaceName, SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT); + Optional optionalTopic = pulsar.getBrokerService() + .getTopic(topicName.getPartition(1).toString(), false).join(); + Assert.assertTrue(optionalTopic.isEmpty()); + + List topics = getPulsar().getNamespaceService().getListOfPersistentTopics(namespaceName).join(); + Assert.assertEquals(topics.size(), 1); + TopicName heartbeatTopicName = TopicName.get("persistent", + namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + Assert.assertEquals(topics.get(0), heartbeatTopicName.toString()); + } + @Test public void testSetBacklogCausedCreatingProducerFailure() throws Exception { final String ns = "prop/ns-test"; @@ -299,4 +345,25 @@ public void testSystemTopicNotCheckExceed() throws Exception { writer1.get().close(); writer2.get().close(); } + + @Test + public void testDeleteTopicSchemaAndPolicyWhenTopicIsNotLoaded() throws Exception { + final String ns = "prop/ns-test"; + admin.namespaces().createNamespace(ns, 2); + final String topicName = "persistent://prop/ns-test/testDeleteTopicSchemaAndPolicyWhenTopicIsNotLoaded"; + admin.topics().createNonPartitionedTopic(topicName); + pulsarClient.newProducer(Schema.STRING).topic(topicName).create().close(); + admin.topicPolicies().setMaxConsumers(topicName, 2); + Awaitility.await().untilAsserted(() -> assertEquals(admin.topicPolicies().getMaxConsumers(topicName), 2)); + CompletableFuture> topic = pulsar.getBrokerService().getTopic(topicName, false); + PersistentTopic persistentTopic = (PersistentTopic) topic.join().get(); + persistentTopic.close(); + admin.topics().delete(topicName); + TopicPolicies topicPolicies = pulsar.getTopicPoliciesService().getTopicPoliciesIfExists(TopicName.get(topicName)); + assertNull(topicPolicies); + String base = TopicName.get(topicName).getPartitionedTopicName(); + String id = TopicName.get(base).getSchemaName(); + CompletableFuture schema = pulsar.getSchemaRegistryService().getSchema(id); + assertNull(schema.join()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/AbstractTestPulsarService.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/AbstractTestPulsarService.java index a6861268b94d5..517d57d0042ae 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/AbstractTestPulsarService.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/AbstractTestPulsarService.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.testcontext; +import java.io.IOException; import org.apache.pulsar.broker.BookKeeperClientFactory; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; @@ -37,11 +38,7 @@ */ abstract class AbstractTestPulsarService extends PulsarService { protected final SpyConfig spyConfig; - protected final MetadataStoreExtended localMetadataStore; - protected final MetadataStoreExtended configurationMetadataStore; - protected final Compactor compactor; - protected final BrokerInterceptor brokerInterceptor; - protected final BookKeeperClientFactory bookKeeperClientFactory; + private boolean compactorExists; public AbstractTestPulsarService(SpyConfig spyConfig, ServiceConfiguration config, MetadataStoreExtended localMetadataStore, @@ -50,53 +47,59 @@ public AbstractTestPulsarService(SpyConfig spyConfig, ServiceConfiguration confi BookKeeperClientFactory bookKeeperClientFactory) { super(config); this.spyConfig = spyConfig; - this.localMetadataStore = - NonClosingProxyHandler.createNonClosingProxy(localMetadataStore, MetadataStoreExtended.class); - this.configurationMetadataStore = - NonClosingProxyHandler.createNonClosingProxy(configurationMetadataStore, MetadataStoreExtended.class); - this.compactor = compactor; - this.brokerInterceptor = brokerInterceptor; - this.bookKeeperClientFactory = bookKeeperClientFactory; + setLocalMetadataStore( + NonClosingProxyHandler.createNonClosingProxy(localMetadataStore, MetadataStoreExtended.class)); + setConfigurationMetadataStore( + NonClosingProxyHandler.createNonClosingProxy(configurationMetadataStore, MetadataStoreExtended.class)); + setCompactor(compactor); + setBrokerInterceptor(brokerInterceptor); + setBkClientFactory(bookKeeperClientFactory); } @Override public MetadataStore createConfigurationMetadataStore(PulsarMetadataEventSynchronizer synchronizer) throws MetadataStoreException { if (synchronizer != null) { - synchronizer.registerSyncListener(configurationMetadataStore::handleMetadataEvent); + synchronizer.registerSyncListener( + ((MetadataStoreExtended) getConfigurationMetadataStore())::handleMetadataEvent); } - return configurationMetadataStore; + return getConfigurationMetadataStore(); } @Override public MetadataStoreExtended createLocalMetadataStore(PulsarMetadataEventSynchronizer synchronizer) throws MetadataStoreException, PulsarServerException { if (synchronizer != null) { - synchronizer.registerSyncListener(localMetadataStore::handleMetadataEvent); + synchronizer.registerSyncListener( + getLocalMetadataStore()::handleMetadataEvent); } - return localMetadataStore; + return getLocalMetadataStore(); } @Override - public Compactor newCompactor() throws PulsarServerException { + protected void setCompactor(Compactor compactor) { if (compactor != null) { - return compactor; - } else { - return spyConfig.getCompactor().spy(super.newCompactor()); + compactorExists = true; } + super.setCompactor(compactor); } @Override - public BrokerInterceptor getBrokerInterceptor() { - if (brokerInterceptor != null) { - return brokerInterceptor; + public Compactor newCompactor() throws PulsarServerException { + if (compactorExists) { + return getCompactor(); } else { - return super.getBrokerInterceptor(); + return spyConfig.getCompactor().spy(super.newCompactor()); } } @Override public BookKeeperClientFactory newBookKeeperClientFactory() { - return bookKeeperClientFactory; + return getBkClientFactory(); + } + + @Override + protected BrokerInterceptor newBrokerInterceptor() throws IOException { + return getBrokerInterceptor() != null ? getBrokerInterceptor() : super.newBrokerInterceptor(); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java index 4b7762a2acfdf..945a4c7ea2534 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java @@ -20,7 +20,9 @@ import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import io.netty.channel.EventLoopGroup; +import java.io.IOException; import java.util.Collections; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -37,10 +39,11 @@ import org.apache.pulsar.broker.resources.TopicResources; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.schema.DefaultSchemaRegistryService; -import org.apache.pulsar.broker.service.schema.SchemaRegistryService; import org.apache.pulsar.broker.storage.ManagedLedgerStorage; -import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.broker.transaction.buffer.TransactionBufferProvider; +import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.naming.TopicName; @@ -53,13 +56,6 @@ * for a "non-startable" PulsarService. Please see {@link PulsarTestContext} for more details. */ class NonStartableTestPulsarService extends AbstractTestPulsarService { - private final PulsarResources pulsarResources; - private final ManagedLedgerStorage managedLedgerClientFactory; - private final BrokerService brokerService; - - private final SchemaRegistryService schemaRegistryService; - - private final PulsarClientImpl pulsarClient; private final NamespaceService namespaceService; @@ -73,22 +69,39 @@ public NonStartableTestPulsarService(SpyConfig spyConfig, ServiceConfiguration c Function brokerServiceCustomizer) { super(spyConfig, config, localMetadataStore, configurationMetadataStore, compactor, brokerInterceptor, bookKeeperClientFactory); - this.pulsarResources = pulsarResources; - this.managedLedgerClientFactory = managedLedgerClientFactory; + setPulsarResources(pulsarResources); + setManagedLedgerClientFactory(managedLedgerClientFactory); try { - this.brokerService = brokerServiceCustomizer.apply( - spyConfig.getBrokerService().spy(TestBrokerService.class, this, getIoEventLoopGroup())); + setBrokerService(brokerServiceCustomizer.apply( + spyConfig.getBrokerService().spy(TestBrokerService.class, this, getIoEventLoopGroup()))); } catch (Exception e) { throw new RuntimeException(e); } - this.schemaRegistryService = spyWithClassAndConstructorArgs(DefaultSchemaRegistryService.class); - this.pulsarClient = mock(PulsarClientImpl.class); + setSchemaRegistryService(spyWithClassAndConstructorArgs(DefaultSchemaRegistryService.class)); + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); + setClient(mockClient); this.namespaceService = mock(NamespaceService.class); try { startNamespaceService(); } catch (PulsarServerException e) { throw new RuntimeException(e); } + if (config.isTransactionCoordinatorEnabled()) { + try { + setTransactionBufferProvider(TransactionBufferProvider + .newProvider(config.getTransactionBufferProviderClassName())); + } catch (IOException e) { + throw new RuntimeException(e); + } + try { + setTransactionPendingAckStoreProvider(TransactionPendingAckStoreProvider + .newProvider(config.getTransactionPendingAckStoreProviderClassName())); + } catch (IOException e) { + throw new RuntimeException(e); + } + } } @Override @@ -101,63 +114,17 @@ public Supplier getNamespaceServiceProvider() throws PulsarSer return () -> namespaceService; } - @Override - public synchronized PulsarClient getClient() throws PulsarServerException { - return pulsarClient; - } - @Override public PulsarClientImpl createClientImpl(ClientConfigurationData clientConf) throws PulsarClientException { - return pulsarClient; - } - - @Override - public SchemaRegistryService getSchemaRegistryService() { - return schemaRegistryService; - } - - @Override - public PulsarResources getPulsarResources() { - return pulsarResources; - } - - public BrokerService getBrokerService() { - return brokerService; - } - - @Override - public MetadataStore getConfigurationMetadataStore() { - return configurationMetadataStore; - } - - @Override - public MetadataStoreExtended getLocalMetadataStore() { - return localMetadataStore; - } - - @Override - public ManagedLedgerStorage getManagedLedgerClientFactory() { - return managedLedgerClientFactory; - } - - @Override - protected PulsarResources newPulsarResources() { - return pulsarResources; - } - - @Override - protected ManagedLedgerStorage newManagedLedgerClientFactory() throws Exception { - return managedLedgerClientFactory; + try { + return (PulsarClientImpl) getClient(); + } catch (PulsarServerException e) { + throw new PulsarClientException(e); + } } - @Override protected BrokerService newBrokerService(PulsarService pulsar) throws Exception { - return brokerService; - } - - @Override - public BookKeeperClientFactory getBookKeeperClientFactory() { - return bookKeeperClientFactory; + return getBrokerService(); } static class TestBrokerService extends BrokerService { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java index e170425ffe443..018358c791fda 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java @@ -54,6 +54,7 @@ import org.apache.pulsar.broker.service.ServerCnx; import org.apache.pulsar.broker.storage.ManagedLedgerStorage; import org.apache.pulsar.common.util.GracefulExecutorServicesShutdown; +import org.apache.pulsar.common.util.PortManager; import org.apache.pulsar.compaction.Compactor; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreConfig; @@ -153,6 +154,8 @@ public class PulsarTestContext implements AutoCloseable { private final boolean startable; + private final boolean preallocatePorts; + public ManagedLedgerFactory getManagedLedgerFactory() { return managedLedgerClientFactory.getManagedLedgerFactory(); @@ -220,7 +223,9 @@ public static class Builder { protected SpyConfig.Builder spyConfigBuilder = SpyConfig.builder(SpyConfig.SpyType.NONE); protected Consumer pulsarServiceCustomizer; protected ServiceConfiguration svcConfig = initializeConfig(); - protected Consumer configOverrideCustomizer = this::defaultOverrideServiceConfiguration; + protected Consumer configOverrideCustomizer; + + protected boolean configOverrideCalled = false; protected Function brokerServiceCustomizer = Function.identity(); /** @@ -290,6 +295,10 @@ protected void defaultOverrideServiceConfiguration(ServiceConfiguration svcConfi if (svcConfig.getManagedLedgerCacheSizeMB() == unconfiguredDefaults.getManagedLedgerCacheSizeMB()) { svcConfig.setManagedLedgerCacheSizeMB(8); } + + if (svcConfig.getTopicLoadTimeoutSeconds() == unconfiguredDefaults.getTopicLoadTimeoutSeconds()) { + svcConfig.setTopicLoadTimeoutSeconds(10); + } } /** @@ -323,6 +332,9 @@ public Builder spyConfigCustomizer(Consumer spyConfigCustomiz */ public Builder configCustomizer(Consumer configCustomerizer) { configCustomerizer.accept(svcConfig); + if (config != null) { + configCustomerizer.accept(config); + } return this; } @@ -338,6 +350,7 @@ public Builder configCustomizer(Consumer configCustomerize */ public Builder configOverride(Consumer configOverrideCustomizer) { this.configOverrideCustomizer = configOverrideCustomizer; + this.configOverrideCalled = true; return this; } @@ -514,6 +527,12 @@ public final PulsarTestContext build() { if (super.config == null) { config(svcConfig); } + handlePreallocatePorts(super.config); + if (configOverrideCustomizer != null || !configOverrideCalled) { + // call defaultOverrideServiceConfiguration if configOverrideCustomizer + // isn't explicitly set to null with `.configOverride(null)` call + defaultOverrideServiceConfiguration(super.config); + } if (configOverrideCustomizer != null) { configOverrideCustomizer.accept(super.config); } @@ -536,6 +555,37 @@ public final PulsarTestContext build() { return super.build(); } + protected void handlePreallocatePorts(ServiceConfiguration config) { + if (super.preallocatePorts) { + config.getBrokerServicePort().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setBrokerServicePort(Optional.of(PortManager.nextLockedFreePort())); + } + }); + config.getBrokerServicePortTls().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setBrokerServicePortTls(Optional.of(PortManager.nextLockedFreePort())); + } + }); + config.getWebServicePort().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setWebServicePort(Optional.of(PortManager.nextLockedFreePort())); + } + }); + config.getWebServicePortTls().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setWebServicePortTls(Optional.of(PortManager.nextLockedFreePort())); + } + }); + registerCloseable(() -> { + config.getBrokerServicePort().ifPresent(PortManager::releaseLockedPort); + config.getBrokerServicePortTls().ifPresent(PortManager::releaseLockedPort); + config.getWebServicePort().ifPresent(PortManager::releaseLockedPort); + config.getWebServicePortTls().ifPresent(PortManager::releaseLockedPort); + }); + } + } + private void initializeCommonPulsarServices(SpyConfig spyConfig) { if (super.bookKeeperClient == null && super.managedLedgerClientFactory == null) { if (super.executor == null) { @@ -695,7 +745,8 @@ protected void initializePulsarServices(SpyConfig spyConfig, Builder builder) { if (metadataStore == null) { metadataStore = builder.configurationMetadataStore; } - NamespaceResources nsr = spyConfigPulsarResources.spy(NamespaceResources.class, metadataStore, 30); + NamespaceResources nsr = spyConfigPulsarResources.spy(NamespaceResources.class, + builder.localMetadataStore, metadataStore, 30); TopicResources tsr = spyConfigPulsarResources.spy(TopicResources.class, metadataStore); pulsarResources( spyConfigPulsarResources.spy( diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/SegmentAbortedTxnProcessorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/SegmentAbortedTxnProcessorTest.java index c157d7cf8c527..0600833b1adb6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/SegmentAbortedTxnProcessorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/SegmentAbortedTxnProcessorTest.java @@ -18,12 +18,14 @@ */ package org.apache.pulsar.broker.transaction; +import static org.junit.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.lang.reflect.Field; import java.util.LinkedList; import java.util.NavigableMap; +import java.util.Optional; import java.util.Queue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; @@ -37,18 +39,27 @@ import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.SystemTopicTxnBufferSnapshotService.ReferenceCountedWriter; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.systopic.NamespaceEventsSystemTopicFactory; import org.apache.pulsar.broker.systopic.SystemTopicClient; import org.apache.pulsar.broker.transaction.buffer.AbortedTxnProcessor; +import org.apache.pulsar.broker.transaction.buffer.impl.SingleSnapshotAbortedTxnProcessorImpl; import org.apache.pulsar.broker.transaction.buffer.impl.SnapshotSegmentAbortedTxnProcessorImpl; import org.apache.pulsar.broker.transaction.buffer.metadata.v2.TransactionBufferSnapshotIndexes; import org.apache.pulsar.broker.transaction.buffer.metadata.v2.TransactionBufferSnapshotSegment; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.events.EventType; +import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.TopicStats; import org.testcontainers.shaded.org.awaitility.Awaitility; +import org.testcontainers.shaded.org.awaitility.reflect.WhiteboxImpl; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; @@ -64,11 +75,13 @@ public class SegmentAbortedTxnProcessorTest extends TransactionTestBase { @Override @BeforeClass protected void setup() throws Exception { - setUpBase(1, 1, PROCESSOR_TOPIC, 0); + setUpBase(1, 1, null, 0); this.pulsarService = getPulsarServiceList().get(0); this.pulsarService.getConfig().setTransactionBufferSegmentedSnapshotEnabled(true); this.pulsarService.getConfig().setTransactionBufferSnapshotSegmentSize(8 + PROCESSOR_TOPIC.length() + SEGMENT_SIZE * 3); + admin.topics().createNonPartitionedTopic(PROCESSOR_TOPIC); + assertTrue(getSnapshotAbortedTxnProcessor(PROCESSOR_TOPIC) instanceof SnapshotSegmentAbortedTxnProcessorImpl); } @Override @@ -247,8 +260,8 @@ public void testClearSnapshotSegments() throws Exception { private void verifySnapshotSegmentsSize(String topic, int size) throws Exception { SystemTopicClient.Reader reader = pulsarService.getTransactionBufferSnapshotServiceFactory() - .getTxnBufferSnapshotSegmentService() - .createReader(TopicName.get(topic)).get(); + .getTxnBufferSnapshotSegmentService() + .createReader(TopicName.get(topic)).get(); int segmentCount = 0; while (reader.hasMoreEvents()) { Message message = reader.readNextAsync() @@ -286,4 +299,147 @@ private void doCompaction(TopicName topic) throws Exception { CompletableFuture compactionFuture = (CompletableFuture) field.get(snapshotTopic); org.awaitility.Awaitility.await().untilAsserted(() -> assertTrue(compactionFuture.isDone())); } + + /** + * This test verifies the compatibility of the transaction buffer segmented snapshot feature + * when enabled on an existing topic. + * It performs the following steps: + * 1. Creates a topic with segmented snapshot disabled. + * 2. Sends 10 messages without using transactions. + * 3. Sends 10 messages using transactions and aborts them. + * 4. Sends 10 messages without using transactions. + * 5. Verifies that only the non-transactional messages are received. + * 6. Enables the segmented snapshot feature and sets the snapshot segment size. + * 7. Unloads the topic. + * 8. Sends a new message using a transaction and aborts it. + * 9. Verifies that the topic has exactly one segment. + * 10. Re-subscribes the consumer and re-verifies that only the non-transactional messages are received. + */ + @Test + public void testSnapshotProcessorUpgrade() throws Exception { + String NAMESPACE2 = TENANT + "/ns2"; + admin.namespaces().createNamespace(NAMESPACE2); + this.pulsarService = getPulsarServiceList().get(0); + this.pulsarService.getConfig().setTransactionBufferSegmentedSnapshotEnabled(false); + + // Create a topic, send 10 messages without using transactions, and send 10 messages using transactions. + // Abort these transactions and verify the data. + final String topicName = "persistent://" + NAMESPACE2 + "/testSnapshotProcessorUpgrade"; + Producer producer = pulsarClient.newProducer().topic(topicName).create(); + Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("test-sub").subscribe(); + + assertTrue(getSnapshotAbortedTxnProcessor(topicName) instanceof SingleSnapshotAbortedTxnProcessorImpl); + // Send 10 messages without using transactions + for (int i = 0; i < 10; i++) { + producer.send(("test-message-" + i).getBytes()); + } + + // Send 10 messages using transactions and abort them + for (int i = 0; i < 10; i++) { + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + producer.newMessage(txn).value(("test-txn-message-" + i).getBytes()).sendAsync(); + txn.abort().get(); + } + + // Send 10 messages without using transactions + for (int i = 10; i < 20; i++) { + producer.send(("test-message-" + i).getBytes()); + } + + // Verify the data + for (int i = 0; i < 20; i++) { + Message msg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals("test-message-" + i, new String(msg.getData())); + } + + // Enable segmented snapshot + this.pulsarService.getConfig().setTransactionBufferSegmentedSnapshotEnabled(true); + this.pulsarService.getConfig().setTransactionBufferSnapshotSegmentSize(8 + PROCESSOR_TOPIC.length() + + SEGMENT_SIZE * 3); + + // Unload the topic + admin.topics().unload(topicName); + assertTrue(getSnapshotAbortedTxnProcessor(topicName) instanceof SnapshotSegmentAbortedTxnProcessorImpl); + + // Sends a new message using a transaction and aborts it. + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + producer.newMessage(txn).value("test-message-new".getBytes()).send(); + txn.abort().get(); + + // Verifies that the topic has exactly one segment. + Awaitility.await().untilAsserted(() -> { + String segmentTopic = "persistent://" + NAMESPACE2 + "/" + + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT_SEGMENTS; + TopicStats topicStats = admin.topics().getStats(segmentTopic); + assertEquals(1, topicStats.getMsgInCounter()); + }); + + // Re-subscribes the consumer and re-verifies that only the non-transactional messages are received. + consumer.close(); + consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("test-sub").subscribe(); + for (int i = 0; i < 20; i++) { + Message msg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals("test-message-" + i, new String(msg.getData())); + } + } + + /** + * This test verifies that when the segmented snapshot feature is enabled, creating a new topic + * does not create a __transaction_buffer_snapshot topic in the same namespace. + * The test performs the following steps: + * 1. Enable the segmented snapshot feature. + * 2. Create a new namespace. + * 3. Create a new topic in the namespace. + * 4. Check that the __transaction_buffer_snapshot topic is not created in the same namespace. + * 5. Destroy the namespace after the test. + */ + @Test + public void testSegmentedSnapshotWithoutCreatingOldSnapshotTopic() throws Exception { + // Enable the segmented snapshot feature + pulsarService = getPulsarServiceList().get(0); + pulsarService.getConfig().setTransactionBufferSegmentedSnapshotEnabled(true); + + // Create a new namespace + String namespaceName = "tnx/testSegmentedSnapshotWithoutCreatingOldSnapshotTopic"; + admin.namespaces().createNamespace(namespaceName); + + // Create a new topic in the namespace + String topicName = "persistent://" + namespaceName + "/newTopic"; + Producer producer = pulsarClient.newProducer().topic(topicName).create(); + producer.close(); + assertTrue(getSnapshotAbortedTxnProcessor(topicName) instanceof SnapshotSegmentAbortedTxnProcessorImpl); + // Check that the __transaction_buffer_snapshot topic is not created in the same namespace + String transactionBufferSnapshotTopic = "persistent://" + namespaceName + "/" + + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT; + try { + admin.topics().getStats(transactionBufferSnapshotTopic); + fail("The __transaction_buffer_snapshot topic should not exist"); + } catch (PulsarAdminException e) { + assertEquals(e.getStatusCode(), 404); + } + + // Destroy the namespace after the test + admin.namespaces().deleteNamespace(namespaceName, true); + } + + private AbortedTxnProcessor getSnapshotAbortedTxnProcessor(String topicName) { + PersistentTopic persistentTopic = getPersistentTopic(topicName); + return WhiteboxImpl.getInternalState(persistentTopic.getTransactionBuffer(), "snapshotAbortedTxnProcessor"); + } + + private PersistentTopic getPersistentTopic(String topicName) { + for (PulsarService pulsar : getPulsarServiceList()) { + CompletableFuture> future = + pulsar.getBrokerService().getTopic(topicName, false); + if (future == null) { + continue; + } + return (PersistentTopic) future.join().get(); + } + throw new NullPointerException("topic[" + topicName + "] not found"); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java index 78846fb75922a..9e262d1cb5617 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java @@ -19,9 +19,12 @@ package org.apache.pulsar.broker.transaction; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertTrue; import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -32,15 +35,21 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.persistent.MessageRedeliveryController; +import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.ChunkMessageIdImpl; import org.apache.pulsar.common.api.proto.MessageIdData; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.TxnAction; @@ -224,6 +233,62 @@ public void sortedTest() throws Exception { log.info("TransactionConsumeTest sortedTest finish."); } + @Test + public void testMessageRedelivery() throws Exception { + int transactionMessageCnt = 10; + String subName = "shared-test"; + + @Cleanup + Consumer sharedConsumer = pulsarClient.newConsumer() + .topic(CONSUME_TOPIC) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + Awaitility.await().until(sharedConsumer::isConnected); + + long mostSigBits = 2L; + long leastSigBits = 5L; + TxnID txnID = new TxnID(mostSigBits, leastSigBits); + + // produce batch message with txn and then abort + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0).getBrokerService() + .getTopic(CONSUME_TOPIC, false).get().get(); + + List sendMessageList = new ArrayList<>(); + List messageIdDataList = appendTransactionMessages(txnID, persistentTopic, transactionMessageCnt, sendMessageList); + + persistentTopic.endTxn(txnID, TxnAction.ABORT_VALUE, 0L).get(); + log.info("Abort txn."); + + // redeliver transaction messages to shared consumer + PersistentSubscription subRef = persistentTopic.getSubscription(subName); + PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) subRef + .getDispatcher(); + Field redeliveryMessagesField = PersistentDispatcherMultipleConsumers.class + .getDeclaredField("redeliveryMessages"); + redeliveryMessagesField.setAccessible(true); + MessageRedeliveryController redeliveryMessages = new MessageRedeliveryController(true); + + final Field totalAvailablePermitsField = PersistentDispatcherMultipleConsumers.class + .getDeclaredField("totalAvailablePermits"); + totalAvailablePermitsField.setAccessible(true); + totalAvailablePermitsField.set(dispatcher, 1000); + + for (MessageIdData messageIdData : messageIdDataList) { + redeliveryMessages.add(messageIdData.getLedgerId(), messageIdData.getEntryId()); + } + + redeliveryMessagesField.set(dispatcher, redeliveryMessages); + dispatcher.readMoreEntries(); + + // shared consumer should not receive the redelivered aborted transaction messages + Message message = sharedConsumer.receive(5, TimeUnit.SECONDS); + Assert.assertNull(message); + + log.info("TransactionConsumeTest testMessageRedelivery finish."); + } + private void sendNormalMessages(Producer producer, int startMsgCnt, int messageCnt, List sendMessageList) throws PulsarClientException { @@ -308,4 +373,45 @@ public void completed(Exception e, long ledgerId, long entryId) { return positionList; } + @Test + public void testAckChunkMessage() throws Exception { + String producerName = "test-producer"; + String subName = "testAckChunkMessage"; + @Cleanup + PulsarClient pulsarClient1 = PulsarClient.builder().serviceUrl(pulsarServiceList.get(0).getBrokerServiceUrl()) + .enableTransaction(true).build(); + @Cleanup + Producer producer = pulsarClient1 + .newProducer(Schema.STRING) + .producerName(producerName) + .topic(CONSUME_TOPIC) + .enableChunking(true) + .enableBatching(false) + .create(); + Consumer consumer = pulsarClient1 + .newConsumer(Schema.STRING) + .subscriptionType(SubscriptionType.Shared) + .topic(CONSUME_TOPIC) + .subscriptionName(subName) + .subscribe(); + + int messageSize = 6000; // payload size in KB + String message = "a".repeat(messageSize * 1000); + MessageId messageId = producer.newMessage().value(message).send(); + assertTrue(messageId instanceof ChunkMessageIdImpl); + assertNotEquals(((ChunkMessageIdImpl) messageId).getLastChunkMessageId(), + ((ChunkMessageIdImpl) messageId).getFirstChunkMessageId()); + + Transaction transaction = pulsarClient1.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + + Message msg = consumer.receive(); + consumer.acknowledgeAsync(msg.getMessageId(), transaction); + transaction.commit().get(); + + Assert.assertEquals(admin.topics().getStats(CONSUME_TOPIC).getSubscriptions().get(subName) + .getUnackedMessages(), 0); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java index cdbb1563280b4..ddd8cf0790321 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java @@ -26,6 +26,8 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import lombok.Cleanup; @@ -43,6 +45,7 @@ import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.Transaction; @@ -51,10 +54,11 @@ import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException; import org.awaitility.Awaitility; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; /** @@ -70,7 +74,7 @@ public class TransactionProduceTest extends TransactionTestBase { private static final String ACK_COMMIT_TOPIC = NAMESPACE1 + "/ack-commit"; private static final String ACK_ABORT_TOPIC = NAMESPACE1 + "/ack-abort"; private static final int NUM_PARTITIONS = 16; - @BeforeMethod + @BeforeClass protected void setup() throws Exception { setUpBase(1, NUM_PARTITIONS, PRODUCE_COMMIT_TOPIC, TOPIC_PARTITION); admin.topics().createPartitionedTopic(PRODUCE_ABORT_TOPIC, TOPIC_PARTITION); @@ -78,7 +82,7 @@ protected void setup() throws Exception { admin.topics().createPartitionedTopic(ACK_ABORT_TOPIC, TOPIC_PARTITION); } - @AfterMethod(alwaysRun = true) + @AfterClass(alwaysRun = true) protected void cleanup() throws Exception { super.internalCleanup(); } @@ -369,5 +373,26 @@ private int getPendingAckCount(String topic, String subscriptionName) throws Exc return pendingAckCount; } - + @Test + public void testCommitFailure() throws Exception { + Transaction txn = pulsarClient.newTransaction().build().get(); + final String topic = NAMESPACE1 + "/test-commit-failure"; + @Cleanup + final Producer producer = pulsarClient.newProducer().topic(topic).create(); + producer.newMessage(txn).value(new byte[1024 * 1024 * 10]).sendAsync(); + try { + txn.commit().get(); + Assert.fail(); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof PulsarClientException.TransactionHasOperationFailedException); + Assert.assertEquals(txn.getState(), Transaction.State.ABORTED); + } + try { + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getTxnMeta(txn.getTxnID()) + .getNow(null); + Assert.fail(); + } catch (CompletionException e) { + Assert.assertTrue(e.getCause() instanceof CoordinatorException.TransactionNotFoundException); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java index c3533e70cf8be..c947ba27069f1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java @@ -106,6 +106,7 @@ import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStoreProvider; import org.apache.pulsar.broker.transaction.pendingack.impl.PendingAckHandleImpl; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; @@ -272,6 +273,27 @@ public void testCreateTransactionSystemTopic() throws Exception { } } + @Test + public void testCanDeleteNamespaceWhenEnableTxnSegmentedSnapshot() throws Exception { + // Enable the segmented snapshot feature + pulsarServiceList.get(0).getConfig().setTransactionBufferSegmentedSnapshotEnabled(true); + pulsarServiceList.get(0).getConfig().setForceDeleteNamespaceAllowed(true); + + // Create a new namespace + String namespaceName = TENANT + "/testSegmentedSnapshotWithoutCreatingOldSnapshotTopic"; + admin.namespaces().createNamespace(namespaceName); + + // Create a new topic in the namespace + String topicName = "persistent://" + namespaceName + "/newTopic"; + @Cleanup + Producer producer = pulsarClient.newProducer().topic(topicName).create(); + producer.close(); + + // Destroy the namespace after the test + admin.namespaces().deleteNamespace(namespaceName, true); + pulsarServiceList.get(0).getConfig().setTransactionBufferSegmentedSnapshotEnabled(false); + } + @Test public void brokerNotInitTxnManagedLedgerTopic() throws Exception { String subName = "test"; @@ -1757,4 +1779,117 @@ private void getTopic(String topicName) { }); } + @Test + public void testReadCommittedWithReadCompacted() throws Exception{ + final String namespace = "tnx/ns-prechecks"; + final String topic = "persistent://" + namespace + "/test_transaction_topic"; + admin.namespaces().createNamespace(namespace); + admin.topics().createNonPartitionedTopic(topic); + + admin.topicPolicies().setCompactionThreshold(topic, 100 * 1024 * 1024); + + @Cleanup + Consumer consumer = this.pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Exclusive) + .readCompacted(true) + .subscribe(); + + @Cleanup + Producer producer = this.pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.newMessage().key("K1").value("V1").send(); + + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(1, TimeUnit.MINUTES).build().get(); + producer.newMessage(txn).key("K2").value("V2").send(); + producer.newMessage(txn).key("K3").value("V3").send(); + + List messages = new ArrayList<>(); + while (true) { + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message == null) { + break; + } + messages.add(message.getValue()); + } + + Assert.assertEquals(messages, List.of("V1")); + + txn.commit(); + + messages.clear(); + + while (true) { + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message == null) { + break; + } + messages.add(message.getValue()); + } + + Assert.assertEquals(messages, List.of("V2", "V3")); + } + + + @Test + public void testReadCommittedWithCompaction() throws Exception{ + final String namespace = "tnx/ns-prechecks"; + final String topic = "persistent://" + namespace + "/test_transaction_topic" + UUID.randomUUID(); + admin.namespaces().createNamespace(namespace); + admin.topics().createNonPartitionedTopic(topic); + + admin.topicPolicies().setCompactionThreshold(topic, 100 * 1024 * 1024); + + @Cleanup + Producer producer = this.pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.newMessage().key("K1").value("V1").send(); + + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(1, TimeUnit.MINUTES).build().get(); + producer.newMessage(txn).key("K2").value("V2").send(); + producer.newMessage(txn).key("K3").value("V3").send(); + txn.commit().get(); + + producer.newMessage().key("K1").value("V4").send(); + + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(1, TimeUnit.MINUTES).build().get(); + producer.newMessage(txn2).key("K2").value("V5").send(); + producer.newMessage(txn2).key("K3").value("V6").send(); + txn2.commit().get(); + + admin.topics().triggerCompaction(topic); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topic).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Consumer consumer = this.pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Exclusive) + .readCompacted(true) + .subscribe(); + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + + result.add(receive.getValue()); + } + + Assert.assertEquals(result, List.of("V4", "V5", "V6")); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java index f45eda8d21fbe..3a83d2f95fea0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java @@ -114,10 +114,10 @@ protected void setUpBase(int numBroker,int numPartitionsOfTC, String topic, int new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); createTransactionCoordinatorAssign(numPartitionsOfTC); + admin.tenants().createTenant(TENANT, + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NAMESPACE1); if (topic != null) { - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); if (numPartitions == 0) { admin.topics().createNonPartitionedTopic(topic); } else { @@ -156,10 +156,8 @@ protected void startBroker() throws Exception { conf.setBrokerShutdownTimeoutMs(0L); conf.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); conf.setBrokerServicePort(Optional.of(0)); - conf.setBrokerServicePortTls(Optional.of(0)); conf.setAdvertisedAddress("localhost"); conf.setWebServicePort(Optional.of(0)); - conf.setWebServicePortTls(Optional.of(0)); conf.setTransactionCoordinatorEnabled(true); conf.setBrokerDeduplicationEnabled(true); conf.setTransactionBufferSnapshotMaxTransactionCount(2); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java index aa98fc7d70106..30cb3e765cba2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java @@ -18,9 +18,11 @@ */ package org.apache.pulsar.broker.transaction.buffer; +import lombok.Cleanup; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.BrokerService; @@ -31,7 +33,14 @@ import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBuffer; import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferState; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.TopicMessageId; import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.impl.TopicMessageIdImpl; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; @@ -46,6 +55,7 @@ import java.time.Duration; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.UUID; @@ -53,6 +63,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertTrue; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.fail; + public class TopicTransactionBufferTest extends TransactionTestBase { @@ -130,7 +147,7 @@ public void testCheckDeduplicationFailedWhenCreatePersistentTopic() throws Excep } }) .when(brokerService) - .newTopic(Mockito.eq(topic), Mockito.any(), Mockito.eq(brokerService), + .newTopic(Mockito.eq(topic), any(), Mockito.eq(brokerService), Mockito.eq(PersistentTopic.class)); brokerService.createPersistentTopic0(topic, true, new CompletableFuture<>(), Collections.emptyMap()); @@ -138,7 +155,7 @@ public void testCheckDeduplicationFailedWhenCreatePersistentTopic() throws Excep Awaitility.waitAtMost(1, TimeUnit.MINUTES).until(() -> reference.get() != null); PersistentTopic persistentTopic = reference.get(); TransactionBuffer buffer = persistentTopic.getTransactionBuffer(); - Assert.assertTrue(buffer instanceof TopicTransactionBuffer); + assertTrue(buffer instanceof TopicTransactionBuffer); TopicTransactionBuffer ttb = (TopicTransactionBuffer) buffer; TopicTransactionBufferState.State expectState = TopicTransactionBufferState.State.Close; Assert.assertEquals(ttb.getState(), expectState); @@ -163,7 +180,7 @@ public void testCloseTransactionBufferWhenTimeout() throws Exception { return persistentTopic; }) .when(brokerService) - .newTopic(Mockito.eq(topic), Mockito.any(), Mockito.eq(brokerService), + .newTopic(Mockito.eq(topic), any(), Mockito.eq(brokerService), Mockito.eq(PersistentTopic.class)); CompletableFuture> f = brokerService.getTopic(topic, true); @@ -172,11 +189,184 @@ public void testCloseTransactionBufferWhenTimeout() throws Exception { .pollInterval(Duration.ofSeconds(2)).until(() -> reference.get() != null); PersistentTopic persistentTopic = reference.get(); TransactionBuffer buffer = persistentTopic.getTransactionBuffer(); - Assert.assertTrue(buffer instanceof TopicTransactionBuffer); + assertTrue(buffer instanceof TopicTransactionBuffer); TopicTransactionBuffer ttb = (TopicTransactionBuffer) buffer; TopicTransactionBufferState.State expectState = TopicTransactionBufferState.State.Close; Assert.assertEquals(ttb.getState(), expectState); - Assert.assertTrue(f.isCompletedExceptionally()); + assertTrue(f.isCompletedExceptionally()); + } + + /** + * This test mainly test the following two point: + * 1. `getLastMessageIds` will get max read position. + * Send two message |1:0|1:1|; mock max read position as |1:0|; `getLastMessageIds` will get |1:0|. + * 2. `getLastMessageIds` will wait Transaction buffer recover completely. + * Mock `checkIfTBRecoverCompletely` return an exception, `getLastMessageIds` will fail too. + * Mock `checkIfTBRecoverCompletely` return null, `getLastMessageIds` will get correct result. + */ + @Test + public void testGetMaxPositionAfterTBReady() throws Exception { + // 1. Prepare test environment. + String topic = "persistent://" + NAMESPACE1 + "/testGetMaxReadyPositionAfterTBReady"; + // 1.1 Mock component. + TransactionBuffer transactionBuffer = Mockito.spy(TransactionBuffer.class); + when(transactionBuffer.checkIfTBRecoverCompletely(anyBoolean())) + // Handle producer will check transaction buffer recover completely. + .thenReturn(CompletableFuture.completedFuture(null)) + // If the Transaction buffer failed to recover, we can not get the correct last max read id. + .thenReturn(CompletableFuture.failedFuture(new Throwable("Mock fail"))) + // If the transaction buffer recover successfully, the max read position can be acquired successfully. + .thenReturn(CompletableFuture.completedFuture(null)); + TransactionBufferProvider transactionBufferProvider = Mockito.spy(TransactionBufferProvider.class); + Mockito.doReturn(transactionBuffer).when(transactionBufferProvider).newTransactionBuffer(any()); + TransactionBufferProvider originalTBProvider = getPulsarServiceList().get(0).getTransactionBufferProvider(); + Mockito.doReturn(transactionBufferProvider).when(getPulsarServiceList().get(0)).getTransactionBufferProvider(); + // 2. Building producer and consumer. + admin.topics().createNonPartitionedTopic(topic); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("sub") + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + // 3. Send message and test the exception can be handled as expected. + MessageIdImpl messageId = (MessageIdImpl) producer.newMessage().send(); + producer.newMessage().send(); + Mockito.doReturn(new PositionImpl(messageId.getLedgerId(), messageId.getEntryId())) + .when(transactionBuffer).getMaxReadPosition(); + try { + consumer.getLastMessageIds(); + fail(); + } catch (PulsarClientException exception) { + assertTrue(exception.getMessage().contains("Failed to recover Transaction Buffer.")); + } + List messageIdList = consumer.getLastMessageIds(); + assertEquals(messageIdList.size(), 1); + TopicMessageIdImpl actualMessageID = (TopicMessageIdImpl) messageIdList.get(0); + assertEquals(messageId.getLedgerId(), actualMessageID.getLedgerId()); + assertEquals(messageId.getEntryId(), actualMessageID.getEntryId()); + // 4. Clean resource + Mockito.doReturn(originalTBProvider).when(getPulsarServiceList().get(0)).getTransactionBufferProvider(); + } + + /** + * Add a E2E test for the get last message ID. It tests 4 cases. + *

+ * 1. Only normal messages in the topic. + * 2. There are ongoing transactions, last message ID will not be updated until transaction end. + * 3. Aborted transaction will make the last message ID be updated as expected. + * 4. Committed transaction will make the last message ID be updated as expected. + *

+ */ + @Test + public void testGetLastMessageIdsWithOngoingTransactions() throws Exception { + // 1. Prepare environment + String topic = "persistent://" + NAMESPACE1 + "/testGetLastMessageIdsWithOngoingTransactions"; + String subName = "my-subscription"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName(subName) + .subscribe(); + + // 2. Test last max read position can be required correctly. + // 2.1 Case1: send 3 original messages. |1:0|1:1|1:2| + MessageIdImpl expectedLastMessageID = null; + for (int i = 0; i < 3; i++) { + expectedLastMessageID = (MessageIdImpl) producer.newMessage().send(); + } + assertMessageId(consumer, expectedLastMessageID, 0); + // 2.2 Case2: send 2 ongoing transactional messages and 2 original messages. + // |1:0|1:1|1:2|txn1->1:3|1:4|txn2->1:5|1:6|. + Transaction txn1 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + producer.newMessage(txn1).send(); + MessageIdImpl expectedLastMessageID1 = (MessageIdImpl) producer.newMessage().send(); + producer.newMessage(txn2).send(); + MessageIdImpl expectedLastMessageID2 = (MessageIdImpl) producer.newMessage().send(); + // 2.2.1 Last message ID will not change when txn1 and txn2 do not end. + assertMessageId(consumer, expectedLastMessageID, 0); + // 2.2.2 Last message ID will update to 1:4 when txn1 committed. + txn1.commit().get(5, TimeUnit.SECONDS); + assertMessageId(consumer, expectedLastMessageID1, 0); + // 2.2.3 Last message ID will update to 1:6 when txn2 aborted. + txn2.abort().get(5, TimeUnit.SECONDS); + // Todo: We can not ignore the marker's position in this fix. + assertMessageId(consumer, expectedLastMessageID2, 2); + } + + /** + * produce 3 messages and then trigger a ledger switch, + * then create a transaction and send a transactional message. + * As there are messages in the new ledger, the reader should be able to read the messages. + * But reader.hasMessageAvailable() returns false if the entry id of max read position is -1. + * @throws Exception + */ + @Test + public void testGetLastMessageIdsWithOpenTransactionAtLedgerHead() throws Exception { + String topic = "persistent://" + NAMESPACE1 + "/testGetLastMessageIdsWithOpenTransactionAtLedgerHead"; + String subName = "my-subscription"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName(subName) + .subscribe(); + MessageId expectedLastMessageID = null; + for (int i = 0; i < 3; i++) { + expectedLastMessageID = producer.newMessage().value(String.valueOf(i).getBytes()).send(); + System.out.println("expectedLastMessageID: " + expectedLastMessageID); + } + triggerLedgerSwitch(topic); + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + producer.newMessage(txn).send(); + + Reader reader = pulsarClient.newReader() + .topic(topic) + .startMessageId(MessageId.earliest) + .create(); + assertTrue(reader.hasMessageAvailable()); + } + + private void triggerLedgerSwitch(String topicName) throws Exception{ + admin.topics().unload(topicName); + Awaitility.await().until(() -> { + CompletableFuture> topicFuture = + getPulsarServiceList().get(0).getBrokerService().getTopic(topicName, false); + if (!topicFuture.isDone() || topicFuture.isCompletedExceptionally()){ + return false; + } + Optional topicOptional = topicFuture.join(); + if (!topicOptional.isPresent()){ + return false; + } + PersistentTopic persistentTopic = (PersistentTopic) topicOptional.get(); + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + return managedLedger.getState() == ManagedLedgerImpl.State.LedgerOpened; + }); + } + + private void assertMessageId(Consumer consumer, MessageIdImpl expected, int entryOffset) throws Exception { + TopicMessageIdImpl actual = (TopicMessageIdImpl) consumer.getLastMessageIds().get(0); + assertEquals(expected.getEntryId(), actual.getEntryId() - entryOffset); + assertEquals(expected.getLedgerId(), actual.getLedgerId()); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java index 86606e3ae8eb1..1dc086dbe3470 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.transaction.buffer; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; @@ -42,15 +44,22 @@ import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferClientImpl; import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferHandlerImpl; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TransactionBufferClient; import org.apache.pulsar.client.api.transaction.TransactionBufferClientException; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.api.proto.TxnAction; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; @@ -115,6 +124,50 @@ public void testCommitOnTopic() throws ExecutionException, InterruptedException } } + @Test + public void testRecoveryTransactionBufferWhenCommonTopicAndSystemTopicAtDifferentBroker() throws Exception { + for (int i = 0; i < getPulsarServiceList().size(); i++) { + getPulsarServiceList().get(i).getConfig().setTransactionBufferSegmentedSnapshotEnabled(true); + } + String topic1 = NAMESPACE1 + "/testRecoveryTransactionBufferWhenCommonTopicAndSystemTopicAtDifferentBroker"; + admin.tenants().createTenant(TENANT, + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NAMESPACE1, 4); + admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); + pulsarServiceList.get(0).getPulsarResources() + .getNamespaceResources() + .getPartitionedTopicResources() + .createPartitionedTopic(SystemTopicNames.TRANSACTION_COORDINATOR_ASSIGN, + new PartitionedTopicMetadata(3)); + assertTrue(admin.namespaces().getBundles(NAMESPACE1).getNumBundles() > 1); + for (int i = 0; true ; i++) { + topic1 = topic1 + i; + admin.topics().createNonPartitionedTopic(topic1); + String segmentTopicBroker = admin.lookups() + .lookupTopic(NAMESPACE1 + "/" + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT); + String indexTopicBroker = admin.lookups() + .lookupTopic(NAMESPACE1 + "/" + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT_INDEXES); + if (segmentTopicBroker.equals(indexTopicBroker)) { + String topicBroker = admin.lookups().lookupTopic(topic1); + if (!topicBroker.equals(segmentTopicBroker)) { + break; + } + } else { + break; + } + } + pulsarClient = PulsarClient.builder().serviceUrl(pulsarServiceList.get(0).getBrokerServiceUrl()) + .enableTransaction(true).build(); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic1).create(); + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build().get(); + producer.newMessage(transaction).send(); + } + @Test public void testAbortOnTopic() throws ExecutionException, InterruptedException { List> futures = new ArrayList<>(); @@ -157,6 +210,8 @@ public void testAbortOnSubscription() throws ExecutionException, InterruptedExce @Test public void testTransactionBufferMetrics() throws Exception { + this.cleanup(); + this.setup(); //Test commit for (int i = 0; i < partitions; i++) { String topic = partitionedTopicName.getPartition(i).toString(); @@ -203,14 +258,21 @@ public void testTransactionBufferMetrics() throws Exception { assertEquals(pending.size(), 1); } + /** + * This is a flaky test. + */ @Test public void testTransactionBufferClientTimeout() throws Exception { PulsarService pulsarService = pulsarServiceList.get(0); - PulsarClient mockClient = mock(PulsarClientImpl.class); + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); CompletableFuture completableFuture = new CompletableFuture<>(); ClientCnx clientCnx = mock(ClientCnx.class); completableFuture.complete(clientCnx); when(((PulsarClientImpl)mockClient).getConnection(anyString())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(anyString(), anyInt())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(any(), any(), anyInt())).thenReturn(completableFuture); ChannelHandlerContext cnx = mock(ChannelHandlerContext.class); when(clientCnx.ctx()).thenReturn(cnx); Channel channel = mock(Channel.class); @@ -237,7 +299,9 @@ public PulsarClient answer(InvocationOnMock invocation) throws Throwable { ConcurrentSkipListMap outstandingRequests = (ConcurrentSkipListMap) field.get(transactionBufferHandler); - assertEquals(outstandingRequests.size(), 1); + Awaitility.await().atMost(1, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(outstandingRequests.size(), 1); + }); Awaitility.await().atLeast(2, TimeUnit.SECONDS).until(() -> { if (outstandingRequests.size() == 0) { @@ -257,11 +321,13 @@ public PulsarClient answer(InvocationOnMock invocation) throws Throwable { @Test public void testTransactionBufferChannelUnActive() throws PulsarServerException { PulsarService pulsarService = pulsarServiceList.get(0); - PulsarClient mockClient = mock(PulsarClientImpl.class); + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); CompletableFuture completableFuture = new CompletableFuture<>(); ClientCnx clientCnx = mock(ClientCnx.class); completableFuture.complete(clientCnx); - when(((PulsarClientImpl)mockClient).getConnection(anyString())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(anyString(), anyInt())).thenReturn(completableFuture); ChannelHandlerContext cnx = mock(ChannelHandlerContext.class); when(clientCnx.ctx()).thenReturn(cnx); Channel channel = mock(Channel.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java index e92cf29521e34..d1784f6a392bf 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.transaction.buffer; -import com.google.common.collect.Sets; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; @@ -30,7 +29,6 @@ import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -49,8 +47,6 @@ protected void setup() throws Exception { setUpBase(1, 16, null, 0); Awaitility.await().until(() -> ((PulsarClientImpl) pulsarClient) .getTcClient().getState() == TransactionCoordinatorClient.State.READY); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); } @AfterMethod(alwaysRun = true) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java index d6ec092c4456f..278cdbac1f09d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java @@ -24,6 +24,7 @@ import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferHandlerImpl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -31,8 +32,8 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.api.proto.TxnAction; import org.apache.pulsar.common.naming.NamespaceBundle; @@ -46,7 +47,9 @@ public class TransactionBufferHandlerImplTest { @Test public void testRequestCredits() throws PulsarServerException { - PulsarClient pulsarClient = mock(PulsarClientImpl.class); + PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); PulsarService pulsarService = mock(PulsarService.class); NamespaceService namespaceService = mock(NamespaceService.class); when(pulsarService.getNamespaceService()).thenReturn(namespaceService); @@ -54,7 +57,10 @@ public void testRequestCredits() throws PulsarServerException { when(namespaceService.getBundleAsync(any())).thenReturn(CompletableFuture.completedFuture(mock(NamespaceBundle.class))); Optional opData = Optional.empty(); when(namespaceService.getOwnerAsync(any())).thenReturn(CompletableFuture.completedFuture(opData)); - when(((PulsarClientImpl)pulsarClient).getConnection(anyString())).thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); + when(((PulsarClientImpl)pulsarClient).getConnection(anyString(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); + when(((PulsarClientImpl)pulsarClient).getConnection(anyString())) + .thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); TransactionBufferHandlerImpl handler = spy(new TransactionBufferHandlerImpl(pulsarService, null, 1000, 3000)); doNothing().when(handler).endTxn(any()); doReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))).when(handler).getClientCnx(anyString()); @@ -75,7 +81,9 @@ public void testRequestCredits() throws PulsarServerException { @Test public void testMinRequestCredits() throws PulsarServerException { - PulsarClient pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); PulsarService pulsarService = mock(PulsarService.class); when(pulsarService.getClient()).thenReturn(pulsarClient); TransactionBufferHandlerImpl handler = spy(new TransactionBufferHandlerImpl(pulsarService, null, 50, 3000)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java index 55d115905a35d..2297ae79fafa7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java @@ -213,14 +213,14 @@ public void testSyncNormalPositionWhenTBRecover(boolean clientEnableTransaction, position = topicTransactionBuffer.getMaxReadPosition(); assertEquals(position, PositionImpl.EARLIEST); + // change to None state can recover + field.set(topicTransactionBuffer, TopicTransactionBufferState.State.None); + // invoke recover Method method = TopicTransactionBuffer.class.getDeclaredMethod("recover"); method.setAccessible(true); method.invoke(topicTransactionBuffer); - // change to None state can recover - field.set(topicTransactionBuffer, TopicTransactionBufferState.State.None); - // recover success again checkTopicTransactionBufferState(clientEnableTransaction, topicTransactionBuffer); @@ -232,13 +232,15 @@ public void testSyncNormalPositionWhenTBRecover(boolean clientEnableTransaction, private void checkTopicTransactionBufferState(boolean clientEnableTransaction, TopicTransactionBuffer topicTransactionBuffer) { // recover success - Awaitility.await().until(() -> { + Awaitility.await().untilAsserted(() -> { if (clientEnableTransaction) { // recover success, client enable transaction will change to Ready State - return topicTransactionBuffer.getStats(false).state.equals(Ready.name()); + assertEquals(topicTransactionBuffer.getStats(false).state, + Ready.name()); } else { // recover success, client disable transaction will change to NoSnapshot State - return topicTransactionBuffer.getStats(false).state.equals(NoSnapshot.name()); + assertEquals(topicTransactionBuffer.getStats(false).state, + NoSnapshot.name()); } }); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java index ca8efe9d1cc79..b069d31dc6e0d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java @@ -63,6 +63,7 @@ import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.common.util.SecurityUtility; +import org.apache.pulsar.utils.ResourceUtils; import org.asynchttpclient.AsyncHttpClient; import org.asynchttpclient.BoundRequestBuilder; import org.asynchttpclient.DefaultAsyncHttpClient; @@ -84,10 +85,17 @@ public class WebServiceTest { private PulsarService pulsar; private String BROKER_LOOKUP_URL; private String BROKER_LOOKUP_URL_TLS; - private static final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/certificate/server.crt"; - private static final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/certificate/server.key"; - private static final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/certificate/client.crt"; - private static final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/certificate/client.key"; + + private final static String CA_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); + private final static String BROKER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + private final static String BROKER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + private final static String CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + private final static String CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); @Test @@ -351,8 +359,8 @@ private String makeHttpRequest(boolean useTls, boolean useAuth) throws Exception if (useTls) { KeyManager[] keyManagers = null; if (useAuth) { - Certificate[] tlsCert = SecurityUtility.loadCertificatesFromPemFile(TLS_CLIENT_CERT_FILE_PATH); - PrivateKey tlsKey = SecurityUtility.loadPrivateKeyFromPemFile(TLS_CLIENT_KEY_FILE_PATH); + Certificate[] tlsCert = SecurityUtility.loadCertificatesFromPemFile(CLIENT_CERT_FILE_PATH); + PrivateKey tlsKey = SecurityUtility.loadPrivateKeyFromPemFile(CLIENT_KEY_FILE_PATH); KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); ks.load(null, null); @@ -403,10 +411,10 @@ private void setupEnv(boolean enableFilter, boolean enableTls, boolean enableAut config.setAuthenticationProviders(providers); config.setAuthorizationEnabled(false); config.setSuperUserRoles(roles); - config.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - config.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + config.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + config.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); config.setTlsAllowInsecureConnection(allowInsecure); - config.setTlsTrustCertsFilePath(allowInsecure ? "" : TLS_CLIENT_CERT_FILE_PATH); + config.setTlsTrustCertsFilePath(allowInsecure ? "" : CA_CERT_FILE_PATH); config.setClusterName("local"); config.setAdvertisedAddress("localhost"); // TLS certificate expects localhost config.setMetadataStoreUrl("zk:localhost:2181"); @@ -433,8 +441,8 @@ private void setupEnv(boolean enableFilter, boolean enableTls, boolean enableAut serviceUrl = BROKER_URL_BASE_TLS; Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", CLIENT_CERT_FILE_PATH); + authParams.put("tlsKeyFile", CLIENT_KEY_FILE_PATH); adminBuilder.authentication(AuthenticationTls.class.getName(), authParams).allowTlsInsecureConnection(true); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java index 71e30c21b3c20..a189940bee8d5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java @@ -49,6 +49,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.zookeeper.KeeperException.Code; import org.awaitility.Awaitility; @@ -64,12 +65,6 @@ public class AuthenticatedProducerConsumerTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(AuthenticatedProducerConsumerTest.class); - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - private final String BASIC_CONF_FILE_PATH = "./src/test/resources/authentication/basic/.htpasswd"; private final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); @@ -88,9 +83,9 @@ protected void setup() throws Exception { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(true); conf.setTopicLevelPoliciesEnabled(false); @@ -104,7 +99,8 @@ protected void setup() throws Exception { conf.setBrokerClientTlsEnabled(true); conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); conf.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); + "tlsCertFile:" + getTlsFileForClient("admin.cert") + + ",tlsKeyFile:" + getTlsFileForClient("admin.key-pk8")); Set providers = new HashSet<>(); providers.add(AuthenticationProviderTls.class.getName()); @@ -126,7 +122,7 @@ protected void setup() throws Exception { protected final void internalSetup(Authentication auth) throws Exception { admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true).authentication(auth) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).authentication(auth) .build()); String lookupUrl; // For http basic authentication test @@ -136,7 +132,7 @@ protected final void internalSetup(Authentication auth) throws Exception { lookupUrl = pulsar.getBrokerServiceUrlTls(); } replacePulsarClient(PulsarClient.builder().serviceUrl(lookupUrl).statsInterval(0, TimeUnit.SECONDS) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true).authentication(auth) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).authentication(auth) .enableTls(true)); } @@ -188,8 +184,8 @@ public void testTlsSyncProducerAndConsumer(int batchMessageDelayMs) throws Excep log.info("-- Starting {} test --", methodName); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); internalSetup(authTls); @@ -246,8 +242,8 @@ public void testAnonymousSyncProducerAndConsumer(int batchMessageDelayMs) throws log.info("-- Starting {} test --", methodName); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); internalSetup(authTls); @@ -291,8 +287,8 @@ public void testAuthenticationFilterNegative() throws Exception { log.info("-- Starting {} test --", methodName); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); internalSetup(authTls); @@ -324,8 +320,8 @@ public void testInternalServerExceptionOnLookup() throws Exception { log.info("-- Starting {} test --", methodName); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); internalSetup(authTls); @@ -362,8 +358,8 @@ public void testInternalServerExceptionOnLookup() throws Exception { @Test public void testDeleteAuthenticationPoliciesOfTopic() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); internalSetup(authTls); @@ -424,7 +420,8 @@ public void testDeleteAuthenticationPoliciesOfTopic() throws Exception { admin.clusters().deleteCluster("test"); } - private final Authentication tlsAuth = new AuthenticationTls(TLS_CLIENT_CERT_FILE_PATH, TLS_CLIENT_KEY_FILE_PATH); + private final Authentication tlsAuth = + new AuthenticationTls(getTlsFileForClient("admin.cert"), getTlsFileForClient("admin.key-pk8")); private final Authentication tokenAuth = new AuthenticationToken(ADMIN_TOKEN); @DataProvider @@ -454,10 +451,9 @@ public void testTlsTransportWithAnyAuth(Supplier url, Authentication aut @Cleanup PulsarClient client = PulsarClient.builder().serviceUrl(url.get()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) - .tlsKeyFilePath(TLS_CLIENT_KEY_FILE_PATH) - .tlsCertificateFilePath(TLS_CLIENT_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) + .tlsKeyFilePath(getTlsFileForClient("admin.key-pk8")) + .tlsCertificateFilePath(getTlsFileForClient("admin.cert")) .authentication(auth) .allowTlsInsecureConnection(false) .enableTlsHostnameVerification(false) @@ -470,8 +466,8 @@ public void testTlsTransportWithAnyAuth(Supplier url, Authentication aut @Test public void testCleanupEmptyTopicAuthenticationMap() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); internalSetup(authTls); @@ -507,4 +503,48 @@ public void testCleanupEmptyTopicAuthenticationMap() throws Exception { .get().auth_policies.getTopicAuthentication().containsKey(topic)); }); } + + @Test + public void testCleanupEmptySubscriptionAuthenticationMap() throws Exception { + Map authParams = new HashMap<>(); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); + Authentication authTls = new AuthenticationTls(); + authTls.configure(authParams); + internalSetup(authTls); + + admin.clusters().createCluster("test", ClusterData.builder().build()); + admin.tenants().createTenant("p1", + new TenantInfoImpl(Collections.emptySet(), new HashSet<>(admin.clusters().getClusters()))); + String namespace = "p1/ns1"; + admin.namespaces().createNamespace("p1/ns1"); + + // grant permission1 and permission2 + String subscription = "test-sub-1"; + String role1 = "test-user-1"; + String role2 = "test-user-2"; + Set roles = new HashSet<>(); + roles.add(role1); + roles.add(role2); + admin.namespaces().grantPermissionOnSubscription(namespace, subscription, roles); + Optional policies = pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get(namespace)); + assertTrue(policies.isPresent()); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().containsKey(subscription)); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role1)); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role2)); + + // revoke permission1 + admin.namespaces().revokePermissionOnSubscription(namespace, subscription, role1); + policies = pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get(namespace)); + assertTrue(policies.isPresent()); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().containsKey(subscription)); + assertFalse(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role1)); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role2)); + + // revoke permission2 + admin.namespaces().revokePermissionOnSubscription(namespace, subscription, role2); + policies = pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get(namespace)); + assertTrue(policies.isPresent()); + assertFalse(policies.get().auth_policies.getSubscriptionAuthentication().containsKey(subscription)); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java index f2631f591217b..65758aa522b80 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java @@ -30,6 +30,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationTls; import org.apache.pulsar.common.tls.PublicSuffixMatcher; import org.apache.pulsar.common.tls.TlsHostnameVerifier; +import org.assertj.core.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; @@ -46,17 +47,10 @@ public class AuthenticationTlsHostnameVerificationTest extends ProducerConsumerB private final String TLS_MIM_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/hn-verification/broker-cert.pem"; private final String TLS_MIM_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/hn-verification/broker-key.pem"; - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - private final String BASIC_CONF_FILE_PATH = "./src/test/resources/authentication/basic/.htpasswd"; private boolean hostnameVerificationEnabled = true; - private String clientTrustCertFilePath = TLS_TRUST_CERT_FILE_PATH; + private String clientTrustCertFilePath = CA_CERT_FILE_PATH; protected void setup() throws Exception { super.internalSetup(); @@ -81,7 +75,8 @@ protected void setup() throws Exception { conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); conf.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_SERVER_KEY_FILE_PATH); + "tlsCertFile:" + getTlsFileForClient("admin.cert") + + ",tlsKeyFile:" + getTlsFileForClient("admin.key-pk8")); Set providers = new HashSet<>(); providers.add(AuthenticationProviderTls.class.getName()); @@ -100,8 +95,8 @@ protected void setup() throws Exception { protected void setupClient() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); @@ -146,12 +141,15 @@ public void testTlsSyncProducerAndConsumerWithInvalidBrokerHost(boolean hostname clientTrustCertFilePath = TLS_MIM_TRUST_CERT_FILE_PATH; // setup broker cert which has CN = "pulsar" different than broker's hostname="localhost" conf.setBrokerServicePortTls(Optional.of(0)); + // disable topic level policy + conf.setTopicLevelPoliciesEnabled(false); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + conf.setAuthenticationProviders(Sets.newTreeSet(AuthenticationProviderTls.class.getName())); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setTlsCertificateFilePath(TLS_MIM_SERVER_CERT_FILE_PATH); conf.setTlsKeyFilePath(TLS_MIM_SERVER_KEY_FILE_PATH); conf.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_MIM_SERVER_KEY_FILE_PATH); + "tlsCertFile:" + getTlsFileForClient("admin.cert") + "," + "tlsKeyFile:" + TLS_MIM_SERVER_KEY_FILE_PATH); setup(); @@ -188,9 +186,11 @@ public void testTlsSyncProducerAndConsumerCorrectBrokerHost() throws Exception { // setup broker cert which has CN = "localhost" conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setAuthenticationProviders(Sets.newTreeSet(AuthenticationProviderTls.class.getName())); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTopicLevelPoliciesEnabled(false); setup(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java index 0ce3b7df07d1f..ba41848bf2c23 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java @@ -119,6 +119,7 @@ protected void cleanup() throws Exception { public void testProducerAndConsumerAuthorization() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(TestAuthorizationProvider.class.getName()); setup(); @@ -179,6 +180,7 @@ public void testProducerAndConsumerAuthorization() throws Exception { public void testSubscriberPermission() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setEnablePackagesManagement(true); conf.setPackagesManagementStorageProvider(MockedPackagesStorageProvider.class.getName()); conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); @@ -369,6 +371,7 @@ public void testSubscriberPermission() throws Exception { public void testClearBacklogPermission() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); setup(); @@ -610,6 +613,7 @@ public void testUpdateTopicPropertiesAuthorization() throws Exception { public void testSubscriptionPrefixAuthorization() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(TestAuthorizationProviderWithSubscriptionPrefix.class.getName()); setup(); @@ -694,6 +698,7 @@ public void testAuthData() throws Exception { public void testPermissionForProducerCreateInitialSubscription() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); setup(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java index 2af9f450dd3b1..0a4c5b7a318b3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java @@ -27,11 +27,12 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import com.github.benmanes.caffeine.cache.AsyncLoadingCache; import com.google.common.collect.Sets; import com.google.common.util.concurrent.MoreExecutors; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import io.prometheus.client.CollectorRegistry; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Field; @@ -41,10 +42,6 @@ import java.net.URI; import java.net.URL; import java.net.URLConnection; -import java.security.KeyStore; -import java.security.PrivateKey; -import java.security.SecureRandom; -import java.security.cert.Certificate; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -62,11 +59,9 @@ import java.util.stream.Collectors; import javax.naming.AuthenticationException; import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; import lombok.Cleanup; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; @@ -76,10 +71,19 @@ import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerWrapper; import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceUnit; +import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.broker.namespace.OwnedBundle; +import org.apache.pulsar.broker.namespace.OwnershipCache; +import org.apache.pulsar.broker.namespace.ServiceUnitUtils; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.apache.pulsar.client.impl.BinaryProtoLookupService; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.LookupService; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.NamespaceBundle; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.ServiceUnitId; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; @@ -89,6 +93,7 @@ import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; +import org.apache.zookeeper.KeeperException; import org.asynchttpclient.AsyncCompletionHandler; import org.asynchttpclient.AsyncHttpClient; import org.asynchttpclient.AsyncHttpClientConfig; @@ -100,6 +105,7 @@ import org.asynchttpclient.Response; import org.asynchttpclient.channel.DefaultKeepAliveStrategy; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; @@ -167,7 +173,7 @@ public void testMultipleBrokerLookup() throws Exception { // mock: return Broker2 as a Least-loaded broker when leader receives request [3] doReturn(true).when(loadManager1).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager1)); @@ -300,7 +306,7 @@ public void testMultipleBrokerDifferentClusterLookup() throws Exception { // mock: return Broker2 as a Least-loaded broker when leader receives request doReturn(true).when(loadManager2).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager2)); /**** started broker-2 ****/ @@ -425,10 +431,6 @@ public void testPartitionTopicLookup() throws Exception { @Test public void testWebserviceServiceTls() throws Exception { log.info("-- Starting {} test --", methodName); - final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/certificate/server.crt"; - final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/certificate/server.key"; - final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/certificate/client.crt"; - final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/certificate/client.key"; /**** start broker-2 ****/ ServiceConfiguration conf2 = new ServiceConfiguration(); @@ -441,12 +443,15 @@ public void testWebserviceServiceTls() throws Exception { conf2.setWebServicePort(Optional.of(0)); conf2.setWebServicePortTls(Optional.of(0)); conf2.setAdvertisedAddress("localhost"); - conf2.setTlsAllowInsecureConnection(true); - conf2.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf2.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf2.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + conf2.setTlsRequireTrustedClientCertOnConnect(true); + conf2.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf2.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf2.setClusterName(conf.getClusterName()); conf2.setMetadataStoreUrl("zk:localhost:2181"); conf2.setConfigurationMetadataStoreUrl("zk:localhost:3181"); + // Not in use, and because TLS is not configured, it will fail to start + conf2.setSystemTopicEnabled(false); @Cleanup PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(conf2); @@ -455,10 +460,13 @@ public void testWebserviceServiceTls() throws Exception { // restart broker1 with tls enabled conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsAllowInsecureConnection(true); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + conf.setTlsRequireTrustedClientCertOnConnect(true); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setNumExecutorThreadPoolSize(5); + // Not in use, and because TLS is not configured, it will fail to start + conf.setSystemTopicEnabled(false); stopBroker(); startBroker(); pulsar.getLoadManager().get().writeLoadReportOnZookeeper(); @@ -478,7 +486,7 @@ public void testWebserviceServiceTls() throws Exception { // request [3] doReturn(true).when(loadManager1).isCentralized(); doReturn(true).when(loadManager2).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); doReturn(Optional.of(resourceUnit)).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); @@ -492,18 +500,8 @@ public void testWebserviceServiceTls() throws Exception { final String lookupResourceUrl = "/lookup/v2/topic/persistent/my-property/my-ns/my-topic1"; // set client cert_key file - KeyManager[] keyManagers = null; - Certificate[] tlsCert = SecurityUtility.loadCertificatesFromPemFile(TLS_CLIENT_CERT_FILE_PATH); - PrivateKey tlsKey = SecurityUtility.loadPrivateKeyFromPemFile(TLS_CLIENT_KEY_FILE_PATH); - KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - ks.load(null, null); - ks.setKeyEntry("private", tlsKey, "".toCharArray(), tlsCert); - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, "".toCharArray()); - keyManagers = kmf.getKeyManagers(); - TrustManager[] trustManagers = InsecureTrustManagerFactory.INSTANCE.getTrustManagers(); - SSLContext sslCtx = SSLContext.getInstance("TLS"); - sslCtx.init(keyManagers, trustManagers, new SecureRandom()); + SSLContext sslCtx = SecurityUtility.createSslContext(false, CA_CERT_FILE_PATH, + getTlsFileForClient("admin.cert"), getTlsFileForClient("admin.key-pk8"), ""); HttpsURLConnection.setDefaultSSLSocketFactory(sslCtx.getSocketFactory()); // hit broker2 url @@ -521,6 +519,9 @@ public void testWebserviceServiceTls() throws Exception { loadManager1 = null; loadManager2 = null; + + conf.setBrokerServicePortTls(Optional.empty()); + conf.setWebServicePortTls(Optional.empty()); } /** @@ -579,7 +580,7 @@ public void testSplitUnloadLookupTest() throws Exception { loadManagerField.set(pulsar2.getNamespaceService(), new AtomicReference<>(loadManager2)); // mock: return Broker1 as a Least-loaded broker when leader receives request [3] doReturn(true).when(loadManager1).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager1)); @@ -694,7 +695,7 @@ public void testModularLoadManagerSplitBundle() throws Exception { loadManagerField.set(pulsar2.getNamespaceService(), new AtomicReference<>(loadManager2)); // mock: return Broker1 as a Least-loaded broker when leader receives request [3] doReturn(true).when(loadManager1).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); Optional res = Optional.of(resourceUnit); doReturn(res).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); doReturn(res).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); @@ -760,7 +761,7 @@ public void testModularLoadManagerSplitBundle() throws Exception { }); // Unload the NamespacePolicies and AntiAffinity check. - String currentBroker = String.format("%s:%d", "localhost", pulsar.getListenPortHTTP().get()); + String currentBroker = pulsar.getBrokerId(); assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace,"0x00000000_0xffffffff", currentBroker)); assertTrue(loadManager.shouldAntiAffinityNamespaceUnload(namespace,"0x00000000_0xffffffff", currentBroker)); @@ -781,6 +782,160 @@ public void testModularLoadManagerSplitBundle() throws Exception { } } + @Test(timeOut = 20000) + public void testSkipSplitBundleIfOnlyOneBroker() throws Exception { + + log.info("-- Starting {} test --", methodName); + final String loadBalancerName = conf.getLoadManagerClassName(); + final int defaultNumberOfNamespaceBundles = conf.getDefaultNumberOfNamespaceBundles(); + final int loadBalancerNamespaceBundleMaxTopics = conf.getLoadBalancerNamespaceBundleMaxTopics(); + + final String namespace = "my-property/my-ns"; + final String topicName1 = BrokerTestUtil.newUniqueName("persistent://" + namespace + "/tp_"); + final String topicName2 = BrokerTestUtil.newUniqueName("persistent://" + namespace + "/tp_"); + try { + // configure broker with ModularLoadManager. + stopBroker(); + conf.setDefaultNumberOfNamespaceBundles(1); + conf.setLoadBalancerNamespaceBundleMaxTopics(1); + conf.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); + startBroker(); + final ModularLoadManagerWrapper modularLoadManagerWrapper = + (ModularLoadManagerWrapper) pulsar.getLoadManager().get(); + final ModularLoadManagerImpl modularLoadManager = + (ModularLoadManagerImpl) modularLoadManagerWrapper.getLoadManager(); + + // Create one topic and trigger tasks, then verify there is only one bundle now. + Consumer consumer1 = pulsarClient.newConsumer().topic(topicName1) + .subscriptionName("my-subscriber-name").subscribe(); + List bounldes1 = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getBundles(NamespaceName.get(namespace)).getBundles(); + pulsar.getBrokerService().updateRates(); + pulsar.getLoadManager().get().writeLoadReportOnZookeeper(); + pulsar.getLoadManager().get().writeResourceQuotasToZooKeeper(); + modularLoadManager.updateAll(); + assertEquals(bounldes1.size(), 1); + + // Create the second topic and trigger tasks, then verify the split task will be skipped. + Consumer consumer2 = pulsarClient.newConsumer().topic(topicName2) + .subscriptionName("my-subscriber-name").subscribe(); + pulsar.getBrokerService().updateRates(); + pulsar.getLoadManager().get().writeLoadReportOnZookeeper(); + pulsar.getLoadManager().get().writeResourceQuotasToZooKeeper(); + modularLoadManager.updateAll(); + List bounldes2 = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getBundles(NamespaceName.get(namespace)).getBundles(); + assertEquals(bounldes2.size(), 1); + + consumer1.close(); + consumer2.close(); + admin.topics().delete(topicName1, false); + admin.topics().delete(topicName2, false); + } finally { + conf.setDefaultNumberOfNamespaceBundles(defaultNumberOfNamespaceBundles); + conf.setLoadBalancerNamespaceBundleMaxTopics(loadBalancerNamespaceBundleMaxTopics); + conf.setLoadManagerClassName(loadBalancerName); + } + } + + @Test + public void testMergeGetPartitionedMetadataRequests() throws Exception { + // Assert the lookup service is a "BinaryProtoLookupService". + final PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + final LookupService lookupService = pulsarClientImpl.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + + final String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final int topicPartitions = 10; + admin.topics().createPartitionedTopic(tpName, topicPartitions); + + // Verify the request is works after merge the requests. + List> futures = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + futures.add(lookupService.getPartitionedTopicMetadata(TopicName.get(tpName))); + } + for (CompletableFuture future : futures) { + assertEquals(future.join().partitions, topicPartitions); + } + + // cleanup. + admin.topics().deletePartitionedTopic(tpName); + } + + @Test + public void testMergeLookupRequests() throws Exception { + // Assert the lookup service is a "BinaryProtoLookupService". + final PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + final LookupService lookupService = pulsarClientImpl.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + + final String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + + // Create 1 producer and 100 consumers. + List> producers = new ArrayList<>(); + List> consumers = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + producers.add(pulsarClient.newProducer(Schema.STRING).topic(tpName).create()); + } + for (int i = 0; i < 20; i++) { + consumers.add(pulsarClient.newConsumer(Schema.STRING).topic(tpName).subscriptionName("s" + i).subscribe()); + } + + // Verify the lookup count will be smaller than before improve. + int lookupCountBeforeUnload = calculateLookupRequestCount(); + admin.namespaces().unload(TopicName.get(tpName).getNamespace()); + Awaitility.await().untilAsserted(() -> { + for (Producer p : producers) { + assertEquals(WhiteboxImpl.getInternalState(p, "state").toString(), "Ready"); + } + for (Consumer c : consumers) { + assertEquals(WhiteboxImpl.getInternalState(c, "state").toString(), "Ready"); + } + }); + int lookupCountAfterUnload = calculateLookupRequestCount(); + log.info("lookup count before unload: {}, after unload: {}", lookupCountBeforeUnload, lookupCountAfterUnload); + assertTrue(lookupCountAfterUnload < lookupCountBeforeUnload * 2, + "the lookup count should be smaller than before improve"); + + // Verify the producers and consumers is still works. + List messagesSent = new ArrayList<>(); + int index = 0; + for (Producer producer: producers) { + String message = Integer.valueOf(index++).toString(); + producer.send(message); + messagesSent.add(message); + } + HashSet messagesReceived = new HashSet<>(); + for (Consumer consumer : consumers) { + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messagesReceived.add(msg.getValue()); + } + } + assertEquals(messagesReceived.size(), producers.size()); + + // cleanup. + for (Producer producer: producers) { + producer.close(); + } + for (Consumer consumer : consumers) { + consumer.close(); + } + admin.topics().delete(tpName); + } + + private int calculateLookupRequestCount() throws Exception { + int failures = CollectorRegistry.defaultRegistry.getSampleValue("pulsar_broker_lookup_failures_total") + .intValue(); + int answers = CollectorRegistry.defaultRegistry.getSampleValue("pulsar_broker_lookup_answers_total") + .intValue(); + return failures + answers; + } + @Test(timeOut = 10000) public void testPartitionedMetadataWithDeprecatedVersion() throws Exception { @@ -798,6 +953,8 @@ public void testPartitionedMetadataWithDeprecatedVersion() throws Exception { admin.topics().createPartitionedTopic(dest.toString(), totalPartitions); stopBroker(); + conf.setBrokerServicePortTls(Optional.empty()); + conf.setWebServicePortTls(Optional.empty()); conf.setClientLibraryVersionCheckEnabled(true); startBroker(); @@ -955,4 +1112,139 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat return "invalid"; } } + + @Test + public void testLookupConnectionNotCloseIfGetUnloadingExOrMetadataEx() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + Producer producer = pulsarClientImpl.newProducer(Schema.STRING).topic(tpName).create(); + Consumer consumer = pulsarClientImpl.newConsumer(Schema.STRING).topic(tpName) + .subscriptionName("s1").isAckReceiptEnabled(true).subscribe(); + LookupService lookupService = pulsarClientImpl.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + ClientCnx lookupConnection = pulsarClientImpl.getCnxPool().getConnection(lookupService.resolveHost()).join(); + + // Verify the socket will not be closed if the bundle is unloading. + BundleOfTopic bundleOfTopic = new BundleOfTopic(tpName); + bundleOfTopic.setBundleIsUnloading(); + try { + lookupService.getBroker(TopicName.get(tpName)).get(); + fail("It should failed due to the namespace bundle is unloading."); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("is being unloaded")); + } + // Do unload topic, trigger producer & consumer reconnection. + pulsar.getBrokerService().getTopic(tpName, false).join().get().close(true); + assertTrue(lookupConnection.ctx().channel().isActive()); + bundleOfTopic.setBundleIsNotUnloading(); + // Assert producer & consumer could reconnect successful. + producer.send("1"); + HashSet messagesReceived = new HashSet<>(); + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messagesReceived.add(msg.getValue()); + } + assertTrue(messagesReceived.contains("1")); + + // Verify the socket will not be closed if get a metadata ex. + bundleOfTopic.releaseBundleLockAndMakeAcquireFail(); + try { + lookupService.getBroker(TopicName.get(tpName)).get(); + fail("It should failed due to the acquire bundle lock fail."); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("OperationTimeout")); + } + // Do unload topic, trigger producer & consumer reconnection. + pulsar.getBrokerService().getTopic(tpName, false).join().get().close(true); + assertTrue(lookupConnection.ctx().channel().isActive()); + bundleOfTopic.makeAcquireBundleLockSuccess(); + // Assert producer could reconnect successful. + producer.send("2"); + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messagesReceived.add(msg.getValue()); + } + assertTrue(messagesReceived.contains("2")); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(tpName); + } + + private class BundleOfTopic { + + private NamespaceBundle namespaceBundle; + private OwnershipCache ownershipCache; + private AsyncLoadingCache ownedBundlesCache; + + public BundleOfTopic(String tpName) { + namespaceBundle = pulsar.getNamespaceService().getBundle(TopicName.get(tpName)); + ownershipCache = pulsar.getNamespaceService().getOwnershipCache(); + ownedBundlesCache = WhiteboxImpl.getInternalState(ownershipCache, "ownedBundlesCache"); + } + + private void setBundleIsUnloading() { + ownedBundlesCache.get(namespaceBundle).join().setActive(false); + } + + private void setBundleIsNotUnloading() { + ownedBundlesCache.get(namespaceBundle).join().setActive(true); + } + + private void releaseBundleLockAndMakeAcquireFail() throws Exception { + ownedBundlesCache.synchronous().invalidateAll(); + mockZooKeeper.delete(ServiceUnitUtils.path(namespaceBundle), -1); + mockZooKeeper.setAlwaysFail(KeeperException.Code.OPERATIONTIMEOUT); + } + + private void makeAcquireBundleLockSuccess() throws Exception { + mockZooKeeper.unsetAlwaysFail(); + } + } + + @Test(timeOut = 30000) + public void testLookupConnectionNotCloseIfFailedToAcquireOwnershipOfBundle() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + final var pulsarClientImpl = (PulsarClientImpl) pulsarClient; + final var cache = pulsar.getNamespaceService().getOwnershipCache(); + final var bundle = pulsar.getNamespaceService().getBundle(TopicName.get(tpName)); + final var value = cache.getOwnerAsync(bundle).get().orElse(null); + assertNotNull(value); + + cache.invalidateLocalOwnerCache(); + final var lock = pulsar.getCoordinationService().getLockManager(NamespaceEphemeralData.class) + .acquireLock(ServiceUnitUtils.path(bundle), new NamespaceEphemeralData()).join(); + lock.updateValue(null); + log.info("Updated bundle {} with null", bundle.getBundleRange()); + + // wait for the system topic reader to __change_events is closed, otherwise the test will be affected + Thread.sleep(500); + + final var future = pulsarClientImpl.getLookup().getBroker(TopicName.get(tpName)); + final var cnx = pulsarClientImpl.getCnxPool().getConnections().stream().findAny() + .map(CompletableFuture::join).orElse(null); + assertNotNull(cnx); + + try { + future.get(); + fail(); + } catch (ExecutionException e) { + log.info("getBroker failed with {}: {}", e.getCause().getClass().getName(), e.getMessage()); + assertTrue(e.getCause() instanceof PulsarClientException.BrokerMetadataException); + assertTrue(cnx.ctx().channel().isActive()); + lock.updateValue(value); + lock.release(); + assertTrue(e.getMessage().contains("Failed to acquire ownership")); + pulsarClientImpl.getLookup().getBroker(TopicName.get(tpName)).get(); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java index 186bf9d736e45..d716d5a806392 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java @@ -22,6 +22,7 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.expectThrows; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -37,15 +38,9 @@ @Test(groups = "broker-api") public class ClientAuthenticationTlsTest extends ProducerConsumerBase { - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; private final Authentication authenticationTls = - new AuthenticationTls(TLS_CLIENT_CERT_FILE_PATH, TLS_CLIENT_KEY_FILE_PATH); + new AuthenticationTls(getTlsFileForClient("admin.cert"), getTlsFileForClient("admin.key-pk8")); @Override protected void doInitConf() throws Exception { @@ -56,18 +51,20 @@ protected void doInitConf() throws Exception { Set providers = new HashSet<>(); providers.add(AuthenticationProviderTls.class.getName()); conf.setAuthenticationProviders(providers); - - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + conf.setWebServicePortTls(Optional.of(0)); + conf.setBrokerServicePortTls(Optional.of(0)); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setTlsAllowInsecureConnection(false); conf.setBrokerClientTlsEnabled(true); conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); conf.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); - conf.setBrokerClientTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + "tlsCertFile:" + getTlsFileForClient("admin.cert") + + ",tlsKeyFile:" + getTlsFileForClient("admin.key-pk8")); + conf.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); } @BeforeClass(alwaysRun = true) @@ -94,7 +91,7 @@ public void testAdminWithTrustCert() throws PulsarClientException, PulsarAdminEx @Cleanup PulsarAdmin pulsarAdmin = PulsarAdmin.builder().serviceHttpUrl(getPulsar().getWebServiceAddressTls()) .sslProvider("JDK") - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .build(); pulsarAdmin.clusters().getClusters(); } @@ -105,7 +102,7 @@ public void testAdminWithFull() throws PulsarClientException, PulsarAdminExcepti PulsarAdmin pulsarAdmin = PulsarAdmin.builder().serviceHttpUrl(getPulsar().getWebServiceAddressTls()) .sslProvider("JDK") .authentication(authenticationTls) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .build(); pulsarAdmin.clusters().getClusters(); } @@ -139,7 +136,7 @@ public void testClientWithTrustCert() throws PulsarClientException, PulsarAdminE PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(getPulsar().getBrokerServiceUrlTls()) .sslProvider("JDK") .operationTimeout(3, TimeUnit.SECONDS) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .build(); @Cleanup Producer ignored = pulsarClient.newProducer().topic(UUID.randomUUID().toString()).create(); @@ -152,7 +149,7 @@ public void testClientWithFull() throws PulsarClientException, PulsarAdminExcept .sslProvider("JDK") .operationTimeout(3, TimeUnit.SECONDS) .authentication(authenticationTls) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .build(); @Cleanup Producer ignored = pulsarClient.newProducer().topic(UUID.randomUUID().toString()).create(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java index 5e3731fbf24fc..2a0cb3187d208 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java @@ -29,6 +29,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; @@ -46,9 +47,10 @@ import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -@Test(groups = "flaky") +@Test(groups = "broker-impl") public class DeadLetterTopicTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(DeadLetterTopicTest.class); @@ -56,6 +58,7 @@ public class DeadLetterTopicTest extends ProducerConsumerBase { @BeforeMethod(alwaysRun = true) @Override protected void setup() throws Exception { + this.conf.setMaxMessageSize(5 * 1024); super.internalSetup(); super.producerBaseSetup(); } @@ -66,6 +69,15 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + private String createMessagePayload(int size) { + StringBuilder str = new StringBuilder(); + Random rand = new Random(); + for (int i = 0; i < size; i++) { + str.append(rand.nextInt(10)); + } + return str.toString(); + } + @Test public void testDeadLetterTopicWithMessageKey() throws Exception { final String topic = "persistent://my-property/my-ns/dead-letter-topic"; @@ -125,9 +137,13 @@ public void testDeadLetterTopicWithMessageKey() throws Exception { consumer.close(); } + @DataProvider(name = "produceLargeMessages") + public Object[][] produceLargeMessages() { + return new Object[][] { { false }, { true } }; + } - @Test(groups = "quarantine") - public void testDeadLetterTopic() throws Exception { + @Test(dataProvider = "produceLargeMessages") + public void testDeadLetterTopic(boolean produceLargeMessages) throws Exception { final String topic = "persistent://my-property/my-ns/dead-letter-topic"; final int maxRedeliveryCount = 2; @@ -154,28 +170,44 @@ public void testDeadLetterTopic() throws Exception { Producer producer = pulsarClient.newProducer(Schema.BYTES) .topic(topic) + .enableChunking(produceLargeMessages) + .enableBatching(!produceLargeMessages) .create(); + Map messageContent = new HashMap<>(); + for (int i = 0; i < sendMessages; i++) { - producer.send(String.format("Hello Pulsar [%d]", i).getBytes()); + String data; + if (!produceLargeMessages) { + data = String.format("Hello Pulsar [%d]", i); + } else { + data = createMessagePayload(1024 * 10); + } + producer.newMessage().key(String.valueOf(i)).value(data.getBytes()).send(); + messageContent.put(i, data); } producer.close(); int totalReceived = 0; do { - Message message = consumer.receive(); - log.info("consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + Message message = consumer.receive(5, TimeUnit.SECONDS); + assertNotNull(message, "The consumer should be able to receive messages."); + log.info("consumer received message : {}", message.getMessageId()); totalReceived++; } while (totalReceived < sendMessages * (maxRedeliveryCount + 1)); int totalInDeadLetter = 0; do { - Message message = deadLetterConsumer.receive(); - log.info("dead letter consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + Message message = deadLetterConsumer.receive(5, TimeUnit.SECONDS); + assertNotNull(message, "the deadLetterConsumer should receive messages."); + assertEquals(new String(message.getData()), messageContent.get(Integer.parseInt(message.getKey()))); + messageContent.remove(Integer.parseInt(message.getKey())); + log.info("dead letter consumer received message : {}", message.getMessageId()); deadLetterConsumer.acknowledge(message); totalInDeadLetter++; } while (totalInDeadLetter < sendMessages); + assertTrue(messageContent.isEmpty()); deadLetterConsumer.close(); consumer.close(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java index fc103a46027c0..bd0119823fd95 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java @@ -692,8 +692,8 @@ public void testBlockBrokerDispatching() { try { final int waitMills = 500; final int maxUnAckPerBroker = 200; - final double unAckMsgPercentagePerDispatcher = 10; - int maxUnAckPerDispatcher = (int) ((maxUnAckPerBroker * unAckMsgPercentagePerDispatcher) / 100); // 200 * + final double unAckMsgPercentagePerDispatcher = 0.1; + int maxUnAckPerDispatcher = (int) (maxUnAckPerBroker * unAckMsgPercentagePerDispatcher); // 200 * // 10% = 20 // messages pulsar.getConfiguration().setMaxUnackedMessagesPerBroker(maxUnAckPerBroker); @@ -907,8 +907,8 @@ public void testBrokerDispatchBlockAndSubAckBackRequiredMsgs() { .getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked(); try { final int maxUnAckPerBroker = 200; - final double unAckMsgPercentagePerDispatcher = 10; - int maxUnAckPerDispatcher = (int) ((maxUnAckPerBroker * unAckMsgPercentagePerDispatcher) / 100); // 200 * + final double unAckMsgPercentagePerDispatcher = 0.1; + int maxUnAckPerDispatcher = (int) (maxUnAckPerBroker * unAckMsgPercentagePerDispatcher); // 200 * // 10% = 20 // messages pulsar.getConfiguration().setMaxUnackedMessagesPerBroker(maxUnAckPerBroker); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/InjectedClientCnxClientBuilder.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/InjectedClientCnxClientBuilder.java new file mode 100644 index 0000000000000..d29dd4f7061b8 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/InjectedClientCnxClientBuilder.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import io.netty.channel.EventLoopGroup; +import java.util.concurrent.ThreadFactory; +import org.apache.pulsar.client.impl.ClientBuilderImpl; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.ConnectionPool; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.util.ExecutorProvider; +import org.apache.pulsar.common.util.netty.EventLoopUtil; + +public class InjectedClientCnxClientBuilder { + + public static PulsarClientImpl create(final ClientBuilderImpl clientBuilder, + final ClientCnxFactory clientCnxFactory) throws Exception { + ClientConfigurationData conf = clientBuilder.getClientConfigurationData(); + ThreadFactory threadFactory = new ExecutorProvider + .ExtendedThreadFactory("pulsar-client-io", Thread.currentThread().isDaemon()); + EventLoopGroup eventLoopGroup = + EventLoopUtil.newEventLoopGroup(conf.getNumIoThreads(), conf.isEnableBusyWait(), threadFactory); + + // Inject into ClientCnx. + ConnectionPool pool = new ConnectionPool(conf, eventLoopGroup, + () -> clientCnxFactory.generate(conf, eventLoopGroup)); + + return new PulsarClientImpl(conf, eventLoopGroup, pool); + } + + public interface ClientCnxFactory { + + ClientCnx generate(ClientConfigurationData conf, EventLoopGroup eventLoopGroup); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java index 7b617a6a192d8..18fb141be3178 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java @@ -114,6 +114,7 @@ public Object[][] topicDomainProvider() { @BeforeClass(alwaysRun = true) @Override protected void setup() throws Exception { + this.conf.setUnblockStuckSubscriptionEnabled(true); super.internalSetup(); super.producerBaseSetup(); this.conf.setSubscriptionKeySharedUseConsistentHashing(true); @@ -1554,4 +1555,79 @@ public void testStickyKeyRangesRestartConsumers() throws Exception { producerFuture.get(); } + + @Test + public void testContinueDispatchMessagesWhenMessageDelayed() throws Exception { + int delayedMessages = 40; + int messages = 40; + int sum = 0; + final String topic = "persistent://public/default/key_shared-" + UUID.randomUUID(); + final String subName = "my-sub"; + + @Cleanup + Consumer consumer1 = pulsarClient.newConsumer(Schema.INT32) + .topic(topic) + .subscriptionName(subName) + .receiverQueueSize(10) + .subscriptionType(SubscriptionType.Key_Shared) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.INT32) + .topic(topic) + .create(); + + for (int i = 0; i < delayedMessages; i++) { + MessageId messageId = producer.newMessage() + .key(String.valueOf(random.nextInt(NUMBER_OF_KEYS))) + .value(100 + i) + .deliverAfter(10, TimeUnit.SECONDS) + .send(); + log.info("Published delayed message :{}", messageId); + } + + for (int i = 0; i < messages; i++) { + MessageId messageId = producer.newMessage() + .key(String.valueOf(random.nextInt(NUMBER_OF_KEYS))) + .value(i) + .send(); + log.info("Published message :{}", messageId); + } + + @Cleanup + Consumer consumer2 = pulsarClient.newConsumer(Schema.INT32) + .topic(topic) + .subscriptionName(subName) + .receiverQueueSize(30) + .subscriptionType(SubscriptionType.Key_Shared) + .subscribe(); + + for (int i = 0; i < delayedMessages + messages; i++) { + Message msg = consumer1.receive(30, TimeUnit.SECONDS); + if (msg != null) { + log.info("c1 message: {}, {}", msg.getValue(), msg.getMessageId()); + consumer1.acknowledge(msg); + } else { + break; + } + sum++; + } + + log.info("Got {} messages...", sum); + + int remaining = delayedMessages + messages - sum; + for (int i = 0; i < remaining; i++) { + Message msg = consumer2.receive(30, TimeUnit.SECONDS); + if (msg != null) { + log.info("c2 message: {}, {}", msg.getValue(), msg.getMessageId()); + consumer2.acknowledge(msg); + } else { + break; + } + sum++; + } + + log.info("Got {} other messages...", sum); + Assert.assertEquals(sum, delayedMessages + messages); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java index b8ea87ab4016e..bb8bab29ad9ef 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java @@ -76,6 +76,11 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @Override + protected void customizeNewPulsarClientBuilder(ClientBuilder clientBuilder) { + clientBuilder.ioThreads(4).connectionsPerBroker(4); + } + // test that reproduces the issue https://github.com/apache/pulsar/issues/12024 // where closing the consumer leads to an endless receive loop @Test @@ -351,4 +356,51 @@ public int choosePartition(Message msg, TopicMetadata metadata) { } consumer.close(); } + + @Test(invocationCount = 10, timeOut = 30000) + public void testMultipleIOThreads() throws PulsarAdminException, PulsarClientException { + final var topic = TopicName.get(newTopicName()).toString(); + final var numPartitions = 100; + admin.topics().createPartitionedTopic(topic, numPartitions); + for (int i = 0; i < 100; i++) { + admin.topics().createNonPartitionedTopic(topic + "-" + i); + } + @Cleanup + final var consumer = pulsarClient.newConsumer(Schema.INT32).topicsPattern(topic + ".*") + .subscriptionName("sub").subscribe(); + assertTrue(consumer instanceof MultiTopicsConsumerImpl); + assertTrue(consumer.isConnected()); + } + + @Test(timeOut = 30000) + public void testSubscriptionNotFound() throws PulsarAdminException, PulsarClientException { + final var topic1 = newTopicName(); + final var topic2 = newTopicName(); + + pulsar.getConfiguration().setAllowAutoSubscriptionCreation(false); + + try { + final var singleTopicConsumer = pulsarClient.newConsumer() + .topic(topic1) + .subscriptionName("sub-1") + .isAckReceiptEnabled(true) + .subscribe(); + assertTrue(singleTopicConsumer instanceof ConsumerImpl); + } catch (Throwable t) { + assertTrue(t.getCause().getCause() instanceof PulsarClientException.SubscriptionNotFoundException); + } + + try { + final var multiTopicsConsumer = pulsarClient.newConsumer() + .topics(List.of(topic1, topic2)) + .subscriptionName("sub-2") + .isAckReceiptEnabled(true) + .subscribe(); + assertTrue(multiTopicsConsumer instanceof MultiTopicsConsumerImpl); + } catch (Throwable t) { + assertTrue(t.getCause().getCause() instanceof PulsarClientException.SubscriptionNotFoundException); + } + + pulsar.getConfiguration().setAllowAutoSubscriptionCreation(true); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java index 2fc8aebf64a4a..81d65b192049b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java @@ -195,7 +195,7 @@ protected void setup() throws Exception { Set superUserRoles = new HashSet<>(); superUserRoles.add("admin"); conf.setSuperUserRoles(superUserRoles); - + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationEnabled(true); conf.setAuthenticationEnabled(true); Set providersClassNames = Sets.newHashSet(MutualAuthenticationProvider.class.getName()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/NonDurableSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/NonDurableSubscriptionTest.java index 52b1498ef7de9..f9c259a97473c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/NonDurableSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/NonDurableSubscriptionTest.java @@ -19,24 +19,40 @@ package org.apache.pulsar.client.api; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNotNull; -import static org.testng.AssertJUnit.assertFalse; import static org.testng.AssertJUnit.assertNull; import static org.testng.AssertJUnit.assertTrue; import java.lang.reflect.Field; import java.util.UUID; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; +import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.PulsarChannelInitializer; import org.apache.pulsar.broker.service.ServerCnx; import org.apache.pulsar.broker.service.nonpersistent.NonPersistentSubscription; import org.apache.pulsar.broker.service.nonpersistent.NonPersistentTopic; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.common.api.proto.CommandFlow; +import org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats; +import org.apache.pulsar.common.policies.data.SubscriptionStats; +import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -45,7 +61,7 @@ @Test(groups = "broker-api") @Slf4j -public class NonDurableSubscriptionTest extends ProducerConsumerBase { +public class NonDurableSubscriptionTest extends ProducerConsumerBase { private final AtomicInteger numFlow = new AtomicInteger(0); @@ -297,4 +313,400 @@ public void testFlowCountForMultiTopics() throws Exception { assertEquals(numFlow.get(), numPartitions); } + + private void trimLedgers(final String tpName) { + // Wait for topic loading. + org.awaitility.Awaitility.await().untilAsserted(() -> { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(tpName, false).join().get(); + assertNotNull(persistentTopic); + }); + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(tpName, false).join().get(); + ManagedLedgerImpl ml = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + CompletableFuture trimLedgersTask = new CompletableFuture<>(); + ml.trimConsumedLedgersInBackground(trimLedgersTask); + trimLedgersTask.join(); + } + + private void switchLedgerManually(final String tpName) throws Exception { + Method ledgerClosed = + ManagedLedgerImpl.class.getDeclaredMethod("ledgerClosed", new Class[]{LedgerHandle.class}); + Method createLedgerAfterClosed = + ManagedLedgerImpl.class.getDeclaredMethod("createLedgerAfterClosed", new Class[0]); + ledgerClosed.setAccessible(true); + createLedgerAfterClosed.setAccessible(true); + + // Wait for topic create. + org.awaitility.Awaitility.await().untilAsserted(() -> { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(tpName, false).join().get(); + assertNotNull(persistentTopic); + }); + + // Switch ledger. + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(tpName, false).join().get(); + ManagedLedgerImpl ml = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + LedgerHandle currentLedger1 = WhiteboxImpl.getInternalState(ml, "currentLedger"); + ledgerClosed.invoke(ml, new Object[]{currentLedger1}); + createLedgerAfterClosed.invoke(ml, new Object[0]); + Awaitility.await().untilAsserted(() -> { + LedgerHandle currentLedger2 = WhiteboxImpl.getInternalState(ml, "currentLedger"); + assertNotEquals(currentLedger1.getId(), currentLedger2.getId()); + }); + } + + @Test + public void testHasMessageAvailableIfIncomingQueueNotEmpty() throws Exception { + final String nonDurableCursor = "non-durable-cursor"; + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + Reader reader = pulsarClient.newReader(Schema.STRING).topic(topicName).receiverQueueSize(1) + .subscriptionName(nonDurableCursor).startMessageId(MessageIdImpl.earliest).create(); + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); + MessageIdImpl msgSent = (MessageIdImpl) producer.send("1"); + + // Trigger switch ledger. + // Trigger a trim ledgers task, and verify trim ledgers successful. + switchLedgerManually(topicName); + trimLedgers(topicName); + + // Since there is one message in the incoming queue, so the method "reader.hasMessageAvailable" should return + // true. + boolean hasMessageAvailable = reader.hasMessageAvailable(); + Message msgReceived = reader.readNext(2, TimeUnit.SECONDS); + if (msgReceived == null) { + assertFalse(hasMessageAvailable); + } else { + log.info("receive msg: {}", msgReceived.getValue()); + assertTrue(hasMessageAvailable); + assertEquals(msgReceived.getValue(), "1"); + } + + // cleanup. + reader.close(); + producer.close(); + admin.topics().delete(topicName); + } + + @Test + public void testInitReaderAtSpecifiedPosition() throws Exception { + String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(topicName); + admin.topics().createSubscription(topicName, "s0", MessageId.earliest); + + // Trigger 5 ledgers. + ArrayList ledgers = new ArrayList<>(); + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); + for (int i = 0; i < 5; i++) { + MessageIdImpl msgId = (MessageIdImpl) producer.send("1"); + ledgers.add(msgId.getLedgerId()); + admin.topics().unload(topicName); + } + producer.close(); + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).join().get(); + ManagedLedgerImpl ml = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + LedgerHandle currentLedger = WhiteboxImpl.getInternalState(ml, "currentLedger"); + log.info("currentLedger: {}", currentLedger.getId()); + + // Less than the first ledger, and entry id is "-1". + log.info("start test s1"); + String s1 = "s1"; + MessageIdImpl startMessageId1 = new MessageIdImpl(ledgers.get(0) - 1, -1, -1); + Reader reader1 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s1) + .receiverQueueSize(0).startMessageId(startMessageId1).create(); + ManagedLedgerInternalStats.CursorStats cursor1 = admin.topics().getInternalStats(topicName).cursors.get(s1); + log.info("cursor1 readPosition: {}, markDeletedPosition: {}", cursor1.readPosition, cursor1.markDeletePosition); + PositionImpl p1 = parseReadPosition(cursor1); + assertEquals(p1.getLedgerId(), ledgers.get(0)); + assertEquals(p1.getEntryId(), 0); + reader1.close(); + + // Less than the first ledger, and entry id is Long.MAX_VALUE. + log.info("start test s2"); + String s2 = "s2"; + MessageIdImpl startMessageId2 = new MessageIdImpl(ledgers.get(0) - 1, Long.MAX_VALUE, -1); + Reader reader2 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s2) + .receiverQueueSize(0).startMessageId(startMessageId2).create(); + ManagedLedgerInternalStats.CursorStats cursor2 = admin.topics().getInternalStats(topicName).cursors.get(s2); + log.info("cursor2 readPosition: {}, markDeletedPosition: {}", cursor2.readPosition, cursor2.markDeletePosition); + PositionImpl p2 = parseReadPosition(cursor2); + assertEquals(p2.getLedgerId(), ledgers.get(0)); + assertEquals(p2.getEntryId(), 0); + reader2.close(); + + // Larger than the latest ledger, and entry id is "-1". + log.info("start test s3"); + String s3 = "s3"; + MessageIdImpl startMessageId3 = new MessageIdImpl(currentLedger.getId() + 1, -1, -1); + Reader reader3 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s3) + .receiverQueueSize(0).startMessageId(startMessageId3).create(); + ManagedLedgerInternalStats.CursorStats cursor3 = admin.topics().getInternalStats(topicName).cursors.get(s3); + log.info("cursor3 readPosition: {}, markDeletedPosition: {}", cursor3.readPosition, cursor3.markDeletePosition); + PositionImpl p3 = parseReadPosition(cursor3); + assertEquals(p3.getLedgerId(), currentLedger.getId()); + assertEquals(p3.getEntryId(), 0); + reader3.close(); + + // Larger than the latest ledger, and entry id is Long.MAX_VALUE. + log.info("start test s4"); + String s4 = "s4"; + MessageIdImpl startMessageId4 = new MessageIdImpl(currentLedger.getId() + 1, Long.MAX_VALUE, -1); + Reader reader4 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s4) + .receiverQueueSize(0).startMessageId(startMessageId4).create(); + ManagedLedgerInternalStats.CursorStats cursor4 = admin.topics().getInternalStats(topicName).cursors.get(s4); + log.info("cursor4 readPosition: {}, markDeletedPosition: {}", cursor4.readPosition, cursor4.markDeletePosition); + PositionImpl p4 = parseReadPosition(cursor4); + assertEquals(p4.getLedgerId(), currentLedger.getId()); + assertEquals(p4.getEntryId(), 0); + reader4.close(); + + // Ledger id and entry id both are Long.MAX_VALUE. + log.info("start test s5"); + String s5 = "s5"; + MessageIdImpl startMessageId5 = new MessageIdImpl(currentLedger.getId() + 1, Long.MAX_VALUE, -1); + Reader reader5 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s5) + .receiverQueueSize(0).startMessageId(startMessageId5).create(); + ManagedLedgerInternalStats.CursorStats cursor5 = admin.topics().getInternalStats(topicName).cursors.get(s5); + log.info("cursor5 readPosition: {}, markDeletedPosition: {}", cursor5.readPosition, cursor5.markDeletePosition); + PositionImpl p5 = parseReadPosition(cursor5); + assertEquals(p5.getLedgerId(), currentLedger.getId()); + assertEquals(p5.getEntryId(), 0); + reader5.close(); + + // Ledger id equals LAC, and entry id is "-1". + log.info("start test s6"); + String s6 = "s6"; + MessageIdImpl startMessageId6 = new MessageIdImpl(ledgers.get(ledgers.size() - 1), -1, -1); + Reader reader6 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s6) + .receiverQueueSize(0).startMessageId(startMessageId6).create(); + ManagedLedgerInternalStats.CursorStats cursor6 = admin.topics().getInternalStats(topicName).cursors.get(s6); + log.info("cursor6 readPosition: {}, markDeletedPosition: {}", cursor6.readPosition, cursor6.markDeletePosition); + PositionImpl p6 = parseReadPosition(cursor6); + assertEquals(p6.getLedgerId(), ledgers.get(ledgers.size() - 1)); + assertEquals(p6.getEntryId(), 0); + reader6.close(); + + // Larger than the latest ledger, and entry id is Long.MAX_VALUE. + log.info("start test s7"); + String s7 = "s7"; + MessageIdImpl startMessageId7 = new MessageIdImpl(ledgers.get(ledgers.size() - 1), Long.MAX_VALUE, -1); + Reader reader7 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s7) + .receiverQueueSize(0).startMessageId(startMessageId7).create(); + ManagedLedgerInternalStats.CursorStats cursor7 = admin.topics().getInternalStats(topicName).cursors.get(s7); + log.info("cursor7 readPosition: {}, markDeletedPosition: {}", cursor7.readPosition, cursor7.markDeletePosition); + PositionImpl p7 = parseReadPosition(cursor7); + assertEquals(p7.getLedgerId(), currentLedger.getId()); + assertEquals(p7.getEntryId(), 0); + reader7.close(); + + // A middle ledger id, and entry id is "-1". + log.info("start test s8"); + String s8 = "s8"; + MessageIdImpl startMessageId8 = new MessageIdImpl(ledgers.get(2), 0, -1); + Reader reader8 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s8) + .receiverQueueSize(0).startMessageId(startMessageId8).create(); + ManagedLedgerInternalStats.CursorStats cursor8 = admin.topics().getInternalStats(topicName).cursors.get(s8); + log.info("cursor8 readPosition: {}, markDeletedPosition: {}", cursor8.readPosition, cursor8.markDeletePosition); + PositionImpl p8 = parseReadPosition(cursor8); + assertEquals(p8.getLedgerId(), ledgers.get(2)); + assertEquals(p8.getEntryId(), 0); + reader8.close(); + + // Larger than the latest ledger, and entry id is Long.MAX_VALUE. + log.info("start test s9"); + String s9 = "s9"; + MessageIdImpl startMessageId9 = new MessageIdImpl(ledgers.get(2), Long.MAX_VALUE, -1); + Reader reader9 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s9) + .receiverQueueSize(0).startMessageId(startMessageId9).create(); + ManagedLedgerInternalStats.CursorStats cursor9 = admin.topics().getInternalStats(topicName).cursors.get(s9); + log.info("cursor9 readPosition: {}, markDeletedPosition: {}", cursor9.readPosition, + cursor9.markDeletePosition); + PositionImpl p9 = parseReadPosition(cursor9); + assertEquals(p9.getLedgerId(), ledgers.get(3)); + assertEquals(p9.getEntryId(), 0); + reader9.close(); + + // Larger than the latest ledger, and entry id equals with the max entry id of this ledger. + log.info("start test s10"); + String s10 = "s10"; + MessageIdImpl startMessageId10 = new MessageIdImpl(ledgers.get(2), 0, -1); + Reader reader10 = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName(s10) + .receiverQueueSize(0).startMessageId(startMessageId10).create(); + ManagedLedgerInternalStats.CursorStats cursor10 = admin.topics().getInternalStats(topicName).cursors.get(s10); + log.info("cursor10 readPosition: {}, markDeletedPosition: {}", cursor10.readPosition, cursor10.markDeletePosition); + PositionImpl p10 = parseReadPosition(cursor10); + assertEquals(p10.getLedgerId(), ledgers.get(2)); + assertEquals(p10.getEntryId(), 0); + reader10.close(); + + // cleanup + admin.topics().delete(topicName, false); + } + + private PositionImpl parseReadPosition(ManagedLedgerInternalStats.CursorStats cursorStats) { + String[] ledgerIdAndEntryId = cursorStats.readPosition.split(":"); + return PositionImpl.get(Long.valueOf(ledgerIdAndEntryId[0]), Long.valueOf(ledgerIdAndEntryId[1])); + } + + @Test + public void testReaderInitAtDeletedPosition() throws Exception { + String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(topicName); + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); + producer.send("1"); + producer.send("2"); + producer.send("3"); + MessageIdImpl msgIdInDeletedLedger4 = (MessageIdImpl) producer.send("4"); + MessageIdImpl msgIdInDeletedLedger5 = (MessageIdImpl) producer.send("5"); + + // Trigger a trim ledgers task, and verify trim ledgers successful. + admin.topics().unload(topicName); + trimLedgers(topicName); + List ledgers = admin.topics().getInternalStats(topicName).ledgers; + assertEquals(ledgers.size(), 1); + assertNotEquals(ledgers.get(0).ledgerId, msgIdInDeletedLedger5.getLedgerId()); + + // Start a reader at a deleted ledger. + MessageIdImpl startMessageId = + new MessageIdImpl(msgIdInDeletedLedger4.getLedgerId(), msgIdInDeletedLedger4.getEntryId(), -1); + Reader reader = pulsarClient.newReader(Schema.STRING).topic(topicName).subscriptionName("s1") + .startMessageId(startMessageId).create(); + Message msg1 = reader.readNext(2, TimeUnit.SECONDS); + Assert.assertNull(msg1); + + // Verify backlog and markDeletePosition is correct. + Awaitility.await().untilAsserted(() -> { + SubscriptionStats subscriptionStats = admin.topics() + .getStats(topicName, true, true, true).getSubscriptions().get("s1"); + log.info("backlog size: {}", subscriptionStats.getMsgBacklog()); + assertEquals(subscriptionStats.getMsgBacklog(), 0); + ManagedLedgerInternalStats.CursorStats cursorStats = + admin.topics().getInternalStats(topicName).cursors.get("s1"); + String[] ledgerIdAndEntryId = cursorStats.markDeletePosition.split(":"); + PositionImpl actMarkDeletedPos = + PositionImpl.get(Long.valueOf(ledgerIdAndEntryId[0]), Long.valueOf(ledgerIdAndEntryId[1])); + PositionImpl expectedMarkDeletedPos = + PositionImpl.get(msgIdInDeletedLedger5.getLedgerId(), msgIdInDeletedLedger5.getEntryId()); + log.info("Expected mark deleted position: {}", expectedMarkDeletedPos); + log.info("Actual mark deleted position: {}", cursorStats.markDeletePosition); + assertTrue(actMarkDeletedPos.compareTo(expectedMarkDeletedPos) >= 0); + }); + + // cleanup. + reader.close(); + producer.close(); + admin.topics().delete(topicName, false); + } + + @Test + public void testTrimLedgerIfNoDurableCursor() throws Exception { + final String nonDurableCursor = "non-durable-cursor"; + final String durableCursor = "durable-cursor"; + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(topicName); + Reader reader = pulsarClient.newReader(Schema.STRING).topic(topicName).receiverQueueSize(1) + .subscriptionName(nonDurableCursor).startMessageId(MessageIdImpl.earliest).create(); + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).topic(topicName).receiverQueueSize(1) + .subscriptionName(durableCursor).subscribe(); + consumer.close(); + + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); + producer.send("1"); + producer.send("2"); + producer.send("3"); + producer.send("4"); + MessageIdImpl msgIdInDeletedLedger5 = (MessageIdImpl) producer.send("5"); + + Message msg1 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg1.getValue(), "1"); + Message msg2 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg2.getValue(), "2"); + Message msg3 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg3.getValue(), "3"); + + // Unsubscribe durable cursor. + // Trigger a trim ledgers task, and verify trim ledgers successful. + admin.topics().unload(topicName); + Thread.sleep(3 * 1000); + admin.topics().deleteSubscription(topicName, durableCursor); + // Trim ledgers after release durable cursor. + trimLedgers(topicName); + List ledgers = admin.topics().getInternalStats(topicName).ledgers; + assertEquals(ledgers.size(), 1); + assertNotEquals(ledgers.get(0).ledgerId, msgIdInDeletedLedger5.getLedgerId()); + + // Verify backlog and markDeletePosition is correct. + Awaitility.await().untilAsserted(() -> { + SubscriptionStats subscriptionStats = admin.topics().getStats(topicName, true, true, true) + .getSubscriptions().get(nonDurableCursor); + log.info("backlog size: {}", subscriptionStats.getMsgBacklog()); + assertEquals(subscriptionStats.getMsgBacklog(), 0); + ManagedLedgerInternalStats.CursorStats cursorStats = + admin.topics().getInternalStats(topicName).cursors.get(nonDurableCursor); + String[] ledgerIdAndEntryId = cursorStats.markDeletePosition.split(":"); + PositionImpl actMarkDeletedPos = + PositionImpl.get(Long.valueOf(ledgerIdAndEntryId[0]), Long.valueOf(ledgerIdAndEntryId[1])); + PositionImpl expectedMarkDeletedPos = + PositionImpl.get(msgIdInDeletedLedger5.getLedgerId(), msgIdInDeletedLedger5.getEntryId()); + log.info("Expected mark deleted position: {}", expectedMarkDeletedPos); + log.info("Actual mark deleted position: {}", cursorStats.markDeletePosition); + Assert.assertTrue(actMarkDeletedPos.compareTo(expectedMarkDeletedPos) >= 0); + }); + + // Clear the incoming queue of the reader for next test. + while (true) { + Message msg = reader.readNext(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + log.info("clear msg: {}", msg.getValue()); + } + + // The following tests are designed to verify the api "getNumberOfEntries" and "consumedEntries" still work + // after changes.See the code-description added with the PR https://github.com/apache/pulsar/pull/10667. + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).join().get(); + ManagedLedgerImpl ml = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + ManagedCursorImpl cursor = (ManagedCursorImpl) ml.getCursors().get(nonDurableCursor); + + // Verify "getNumberOfEntries" if there is no entries to consume. + assertEquals(0, cursor.getNumberOfEntries()); + assertEquals(0, ml.getNumberOfEntries()); + + // Verify "getNumberOfEntries" if there is 1 entry to consume. + producer.send("6"); + producer.send("7"); + Awaitility.await().untilAsserted(() -> { + assertEquals(2, ml.getNumberOfEntries()); + // Since there is one message has been pulled into the incoming queue of reader. There is only one messages + // waiting to cursor read. + assertEquals(1, cursor.getNumberOfEntries()); + }); + + // Verify "consumedEntries" is correct. + ManagedLedgerInternalStats.CursorStats cursorStats = + admin.topics().getInternalStats(topicName).cursors.get(nonDurableCursor); + // "messagesConsumedCounter" should be 0 after unload the topic. + // Note: "topic_internal_stat.cursor.messagesConsumedCounter" means how many messages were acked on this + // cursor. The similar one "topic_stats.lastConsumedTimestamp" means the last time of sending messages to + // the consumer. + assertEquals(0, cursorStats.messagesConsumedCounter); + Message msg6 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg6.getValue(), "6"); + Message msg7 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg7.getValue(), "7"); + Awaitility.await().untilAsserted(() -> { + // "messagesConsumedCounter" should be 2 after consumed 2 message. + ManagedLedgerInternalStats.CursorStats cStat = + admin.topics().getInternalStats(topicName).cursors.get(nonDurableCursor); + assertEquals(2, cStat.messagesConsumedCounter); + }); + + // cleanup. + reader.close(); + producer.close(); + admin.topics().delete(topicName, false); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/OrphanPersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/OrphanPersistentTopicTest.java new file mode 100644 index 0000000000000..7a6189702dd8c --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/OrphanPersistentTopicTest.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.TopicPoliciesService; +import org.apache.pulsar.broker.service.TopicPolicyListener; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.TopicPolicies; +import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-api") +public class OrphanPersistentTopicTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testNoOrphanTopicAfterCreateTimeout() throws Exception { + // Make the topic loading timeout faster. + int topicLoadTimeoutSeconds = 2; + long originalTopicLoadTimeoutSeconds = pulsar.getConfig().getTopicLoadTimeoutSeconds(); + pulsar.getConfig().setTopicLoadTimeoutSeconds(2); + + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + String mlPath = BrokerService.MANAGED_LEDGER_PATH_ZNODE + "/" + TopicName.get(tpName).getPersistenceNamingEncoding(); + + // Make topic load timeout 5 times. + AtomicInteger timeoutCounter = new AtomicInteger(); + for (int i = 0; i < 5; i++) { + mockZooKeeper.delay(topicLoadTimeoutSeconds * 2 * 1000, (op, path) -> { + if (mlPath.equals(path)) { + log.info("Topic load timeout: " + timeoutCounter.incrementAndGet()); + return true; + } + return false; + }); + } + + // Load topic. + CompletableFuture> consumer = pulsarClient.newConsumer() + .topic(tpName) + .subscriptionName("my-sub") + .subscribeAsync(); + + // After create timeout 5 times, the topic will be created successful. + Awaitility.await().ignoreExceptions().atMost(40, TimeUnit.SECONDS).untilAsserted(() -> { + CompletableFuture> future = pulsar.getBrokerService().getTopic(tpName, false); + assertTrue(future.isDone()); + Optional optional = future.get(); + assertTrue(optional.isPresent()); + }); + + // Assert only one PersistentTopic was not closed. + TopicPoliciesService topicPoliciesService = pulsar.getTopicPoliciesService(); + Map>> listeners = + WhiteboxImpl.getInternalState(topicPoliciesService, "listeners"); + assertEquals(listeners.get(TopicName.get(tpName)).size(), 1); + + // cleanup. + consumer.join().close(); + admin.topics().delete(tpName, false); + pulsar.getConfig().setTopicLoadTimeoutSeconds(originalTopicLoadTimeoutSeconds); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java index ca58bddf13c23..f58c1fa26afc7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java @@ -31,11 +31,6 @@ import org.testng.annotations.BeforeMethod; public abstract class ProducerConsumerBase extends MockedPulsarServiceBaseTest { - protected final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - protected final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - protected final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - protected final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - protected final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; protected String methodName; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProxyProtocolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProxyProtocolTest.java index 7f632d5a76485..19009689dc8a1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProxyProtocolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProxyProtocolTest.java @@ -45,11 +45,11 @@ public void testSniProxyProtocol() throws Exception { String topicName = "persistent://my-property/use/my-ns/my-topic1"; ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(brokerServiceUrl) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) .proxyServiceUrl(proxyUrl, ProxyProtocol.SNI).operationTimeout(1000, TimeUnit.MILLISECONDS); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); clientBuilder.authentication(AuthenticationTls.class.getName(), authParams); @Cleanup @@ -68,11 +68,11 @@ public void testSniProxyProtocolWithInvalidProxyUrl() throws Exception { String topicName = "persistent://my-property/use/my-ns/my-topic1"; ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(brokerServiceUrl) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) .proxyServiceUrl(proxyUrl, ProxyProtocol.SNI).operationTimeout(1000, TimeUnit.MILLISECONDS); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); clientBuilder.authentication(AuthenticationTls.class.getName(), authParams); @Cleanup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerDisallowAutoCreateTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerDisallowAutoCreateTopicTest.java new file mode 100644 index 0000000000000..728e556f0224a --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerDisallowAutoCreateTopicTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import static org.apache.pulsar.client.util.RetryMessageUtil.RETRY_GROUP_TOPIC_SUFFIX; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-api") +public class SimpleProducerConsumerDisallowAutoCreateTopicTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + conf.setAllowAutoTopicCreation(false); + } + + @Test + public void testClearErrorIfRetryTopicNotExists() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp_"); + final String subName = "sub"; + final String retryTopicName = topicName + "-" + subName + RETRY_GROUP_TOPIC_SUFFIX; + admin.topics().createNonPartitionedTopic(topicName); + Consumer consumer = null; + try { + consumer = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName(subName) + .enableRetry(true) + .subscribe(); + fail(""); + } catch (Exception ex) { + log.info("got an expected error", ex); + assertTrue(ex.getMessage().contains("Not found:")); + assertTrue(ex.getMessage().contains(retryTopicName)); + } finally { + // cleanup. + if (consumer != null) { + consumer.close(); + } + admin.topics().delete(topicName); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java index 1bc437195d96d..0c0e61fe33f88 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java @@ -39,6 +39,8 @@ import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; import io.netty.util.Timeout; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -84,7 +86,9 @@ import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.PulsarVersion; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.service.ServerCnx; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.schema.GenericRecord; @@ -3136,7 +3140,7 @@ public EncryptionKeyInfo getPrivateKey(String keyName, Map keyMe pulsarClient.newProducer().topic("persistent://my-property/my-ns/myenc-topic1") .addEncryptionKey("client-non-existant-rsa.pem").cryptoKeyReader(new EncKeyReader()) .create(); - Assert.fail("Producer creation should not suceed if failing to read key"); + Assert.fail("Producer creation should not succeed if failing to read key"); } catch (Exception e) { // ok } @@ -4109,6 +4113,35 @@ public void testGetStats() throws Exception { consumer.close(); producer.close(); } + @Test(timeOut = 100000) + public void testMessageListenerGetStats() throws Exception { + final String topicName = "persistent://my-property/my-ns/testGetStats" + UUID.randomUUID(); + final String subName = "my-sub"; + final int receiveQueueSize = 100; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.STRING) + .messageListener((MessageListener) (consumer1, msg) -> { + try { + TimeUnit.SECONDS.sleep(10); + } catch (InterruptedException igr) { + } + }) + .topic(topicName).receiverQueueSize(receiveQueueSize).subscriptionName(subName).subscribe(); + Assert.assertNull(consumer.getStats().getMsgNumInSubReceiverQueue()); + Assert.assertEquals(consumer.getStats().getMsgNumInReceiverQueue().intValue(), 0); + + for (int i = 0; i < receiveQueueSize; i++) { + producer.sendAsync("msg" + i); + } + //Give some time to consume + Awaitility.await() + .untilAsserted(() -> Assert.assertEquals(consumer.getStats().getMsgNumInReceiverQueue().intValue(), receiveQueueSize)); + consumer.close(); + producer.close(); + } @Test(timeOut = 100000) public void testGetStatsForPartitionedTopic() throws Exception { @@ -4613,4 +4646,50 @@ public void testClientVersion() throws Exception { producer2.close(); client.close(); } + + @Test + public void testConsumeWhenDeliveryFailedByIOException() throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "subscription1"; + final int messagesCount = 100; + final int receiverQueueSize = 1; + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic).enableBatching(false).create(); + ConsumerImpl consumer = (ConsumerImpl) pulsarClient.newConsumer(Schema.STRING).topic(topic) + .subscriptionName(subscriptionName).receiverQueueSize(receiverQueueSize).subscribe(); + for (int i = 0; i < messagesCount; i++) { + producer.send(i + ""); + } + // Wait incoming queue of the consumer is full. + Awaitility.await().untilAsserted(() -> { + assertEquals(consumer.getIncomingMessageSize(), receiverQueueSize); + }); + + // Mock an io error for sending messages out. + ServerCnx serverCnx = (ServerCnx) pulsar.getBrokerService().getTopic(topic, false).join().get() + .getSubscription(subscriptionName).getDispatcher().getConsumers().iterator().next().cnx(); + serverCnx.ctx().channel().pipeline().addFirst(new ChannelDuplexHandler() { + + @Override + public void flush(ChannelHandlerContext ctx) throws Exception { + throw new IOException("Mocked error"); + } + }); + + // Verify all messages will be consumed. + Set receivedMessages = new HashSet<>(); + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg != null) { + receivedMessages.add(msg.getValue()); + consumer.acknowledge(msg); + } else { + break; + } + } + Assert.assertEquals(receivedMessages.size(), messagesCount); + + producer.close(); + consumer.close(); + admin.topics().delete(topic, false); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsHostVerificationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsHostVerificationTest.java index 95a78d7ffcef6..fff61c5c8c940 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsHostVerificationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsHostVerificationTest.java @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.impl.auth.AuthenticationTls; @@ -30,21 +31,38 @@ @Test(groups = "broker-api") public class TlsHostVerificationTest extends TlsProducerConsumerBase { + @Override + @Test(enabled = false) + protected void customizeMainPulsarTestContextBuilder(PulsarTestContext.Builder builder) { + builder.configCustomizer(config -> { + // Advertise a hostname that routes but is not on the certificate + // Note that if you are on a Mac, you'll need to run the following to make loopback work for 127.0.0.2 + // $ sudo ifconfig lo0 alias 127.0.0.2 up + config.setAdvertisedAddress("127.0.0.2"); + }); + } + @Test public void testTlsHostVerificationAdminClient() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); - String websocketTlsAddress = pulsar.getWebServiceAddressTls(); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); + Assert.assertTrue(pulsar.getWebServiceAddressTls().startsWith("https://127.0.0.2:"), + "Test relies on this address"); PulsarAdmin adminClientTls = PulsarAdmin.builder() - .serviceHttpUrl(websocketTlsAddress.replace("localhost", "127.0.0.1")) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false) + .serviceHttpUrl(pulsar.getWebServiceAddressTls()) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false) .authentication(AuthenticationTls.class.getName(), authParams).enableTlsHostnameVerification(true) + .requestTimeout(1, java.util.concurrent.TimeUnit.SECONDS) .build(); try { adminClientTls.tenants().getTenants(); Assert.fail("Admin call should be failed due to hostnameVerification enabled"); + } catch (PulsarAdminException.TimeoutException e) { + // The test was previously able to fail here, but that is not the right way for the test to pass. + // If you hit this error and are running on OSX, you may need to run "sudo ifconfig lo0 alias 127.0.0.2 up" + Assert.fail("Admin call should not timeout, it should fail due to SSL error"); } catch (PulsarAdminException e) { // Ok } @@ -53,11 +71,13 @@ public void testTlsHostVerificationAdminClient() throws Exception { @Test public void testTlsHostVerificationDisabledAdminClient() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); + Assert.assertTrue(pulsar.getWebServiceAddressTls().startsWith("https://127.0.0.2:"), + "Test relies on this address"); PulsarAdmin adminClient = PulsarAdmin.builder() .serviceHttpUrl(pulsar.getWebServiceAddressTls()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false) .authentication(AuthenticationTls.class.getName(), authParams).enableTlsHostnameVerification(false) .build(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerBase.java index 6a2109836a2c9..39bab20d97df5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerBase.java @@ -38,11 +38,6 @@ @Test(groups = "broker-api") public abstract class TlsProducerConsumerBase extends ProducerConsumerBase { - protected final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - protected final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - protected final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - protected final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - protected final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; private final String clusterName = "use"; @BeforeMethod @@ -64,9 +59,9 @@ protected void cleanup() throws Exception { protected void internalSetUpForBroker() { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setClusterName(clusterName); conf.setTlsRequireTrustedClientCertOnConnect(true); Set tlsProtocols = Sets.newConcurrentHashSet(); @@ -81,12 +76,12 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl) pulsarClient.close(); } ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(lookupUrl) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false) .operationTimeout(1000, TimeUnit.MILLISECONDS); if (addCertificates) { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); clientBuilder.authentication(AuthenticationTls.class.getName(), authParams); } replacePulsarClient(clientBuilder); @@ -94,15 +89,15 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl) protected void internalSetUpForNamespace() throws Exception { Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); if (admin != null) { admin.close(); } admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false) .authentication(AuthenticationTls.class.getName(), authParams).build()); admin.clusters().createCluster(clusterName, ClusterData.builder() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerTest.java index 0563fc3b9da37..879289eb65dc8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsProducerConsumerTest.java @@ -146,9 +146,9 @@ public void testTlsCertsFromDynamicStream() throws Exception { .operationTimeout(1000, TimeUnit.MILLISECONDS); AtomicInteger index = new AtomicInteger(0); - ByteArrayInputStream certStream = createByteInputStream(TLS_CLIENT_CERT_FILE_PATH); - ByteArrayInputStream keyStream = createByteInputStream(TLS_CLIENT_KEY_FILE_PATH); - ByteArrayInputStream trustStoreStream = createByteInputStream(TLS_TRUST_CERT_FILE_PATH); + ByteArrayInputStream certStream = createByteInputStream(getTlsFileForClient("admin.cert")); + ByteArrayInputStream keyStream = createByteInputStream(getTlsFileForClient("admin.key-pk8")); + ByteArrayInputStream trustStoreStream = createByteInputStream(CA_CERT_FILE_PATH); Supplier certProvider = () -> getStream(index, certStream); Supplier keyProvider = () -> getStream(index, keyStream); @@ -203,9 +203,9 @@ public void testTlsCertsFromDynamicStreamExpiredAndRenewCert() throws Exception AtomicInteger certIndex = new AtomicInteger(1); AtomicInteger keyIndex = new AtomicInteger(0); AtomicInteger trustStoreIndex = new AtomicInteger(1); - ByteArrayInputStream certStream = createByteInputStream(TLS_CLIENT_CERT_FILE_PATH); - ByteArrayInputStream keyStream = createByteInputStream(TLS_CLIENT_KEY_FILE_PATH); - ByteArrayInputStream trustStoreStream = createByteInputStream(TLS_TRUST_CERT_FILE_PATH); + ByteArrayInputStream certStream = createByteInputStream(getTlsFileForClient("admin.cert")); + ByteArrayInputStream keyStream = createByteInputStream(getTlsFileForClient("admin.key-pk8")); + ByteArrayInputStream trustStoreStream = createByteInputStream(CA_CERT_FILE_PATH); Supplier certProvider = () -> getStream(certIndex, certStream, keyStream/* invalid cert file */); Supplier keyProvider = () -> getStream(keyIndex, keyStream); @@ -252,7 +252,8 @@ private ByteArrayInputStream getStream(AtomicInteger index, ByteArrayInputStream return streams[index.intValue()]; } - private final Authentication tlsAuth = new AuthenticationTls(TLS_CLIENT_CERT_FILE_PATH, TLS_CLIENT_KEY_FILE_PATH); + private final Authentication tlsAuth = + new AuthenticationTls(getTlsFileForClient("admin.cert"), getTlsFileForClient("admin.key-pk8")); @DataProvider public Object[] tlsTransport() { @@ -276,13 +277,14 @@ public void testTlsTransport(Supplier url, Authentication auth) throws E internalSetUpForNamespace(); ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(url.get()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .allowTlsInsecureConnection(false) .enableTlsHostnameVerification(false) .authentication(auth); if (auth == null) { - clientBuilder.tlsKeyFilePath(TLS_CLIENT_KEY_FILE_PATH).tlsCertificateFilePath(TLS_CLIENT_CERT_FILE_PATH); + clientBuilder.tlsKeyFilePath(getTlsFileForClient("admin.key-pk8")) + .tlsCertificateFilePath(getTlsFileForClient("admin.cert")); } @Cleanup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsSniTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsSniTest.java index fd722e52e5f1f..173fa8acb0fdd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsSniTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TlsSniTest.java @@ -50,12 +50,12 @@ public void testIpAddressInBrokerServiceUrl() throws Exception { brokerServiceUrlTls.getPort()); ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(brokerServiceIpAddressUrl) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false) .enableTlsHostnameVerification(false) .operationTimeout(1000, TimeUnit.MILLISECONDS); Map authParams = new HashMap<>(); - authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); - authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); clientBuilder.authentication(AuthenticationTls.class.getName(), authParams); @Cleanup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java index 87f12e6acdcb2..4d5e7deaf7d99 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java @@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit; import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.slf4j.Logger; @@ -92,6 +93,8 @@ protected void setup() throws Exception { Set providers = new HashSet<>(); providers.add(AuthenticationProviderToken.class.getName()); conf.setAuthenticationProviders(providers); + conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + conf.setBrokerClientAuthenticationParameters("token:" + ADMIN_TOKEN); conf.setClusterName("test"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java index e955a9ae7062f..4fc0d315d2253 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenExpirationProduceConsumerTest.java @@ -101,9 +101,9 @@ public String getExpireToken(String role, Date date) { protected void internalSetUpForBroker() { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setClusterName(configClusterName); conf.setAuthenticationRefreshCheckSeconds(1); conf.setTlsRequireTrustedClientCertOnConnect(false); @@ -121,7 +121,7 @@ protected void internalSetUpForBroker() { private PulsarClient getClient(String token) throws Exception { ClientBuilder clientBuilder = PulsarClient.builder() .serviceUrl(pulsar.getBrokerServiceUrlTls()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .enableTls(true) .allowTlsInsecureConnection(false) .enableTlsHostnameVerification(true) @@ -132,7 +132,7 @@ private PulsarClient getClient(String token) throws Exception { private PulsarAdmin getAdmin(String token) throws Exception { PulsarAdminBuilder clientBuilder = PulsarAdmin.builder().serviceHttpUrl(pulsar.getWebServiceAddressTls()) - .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .allowTlsInsecureConnection(false) .authentication(AuthenticationToken.class.getName(),"token:" +token) .enableTlsHostnameVerification(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java index 22834b2e0c9c1..ba43ee6d6a2dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java @@ -27,10 +27,13 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; +import org.apache.pulsar.broker.auth.MockOIDCIdentityProvider; import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.impl.ProducerImpl; @@ -38,10 +41,13 @@ import org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -55,25 +61,28 @@ public class TokenOauth2AuthenticatedProducerConsumerTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(TokenOauth2AuthenticatedProducerConsumerTest.class); - // public key in oauth2 server to verify the client passed in token. get from https://jwt.io/ - private final String TOKEN_TEST_PUBLIC_KEY = "data:;base64,MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2tZd/4gJda3U2Pc3tpgRAN7JPGWx/Gn17v/0IiZlNNRbP/Mmf0Vc6G1qsnaRaWNWOR+t6/a6ekFHJMikQ1N2X6yfz4UjMc8/G2FDPRmWjA+GURzARjVhxc/BBEYGoD0Kwvbq/u9CZm2QjlKrYaLfg3AeB09j0btNrDJ8rBsNzU6AuzChRvXj9IdcE/A/4N/UQ+S9cJ4UXP6NJbToLwajQ5km+CnxdGE6nfB7LWHvOFHjn9C2Rb9e37CFlmeKmIVFkagFM0gbmGOb6bnGI8Bp/VNGV0APef4YaBvBTqwoZ1Z4aDHy5eRxXfAMdtBkBupmBXqL6bpd15XRYUbu/7ck9QIDAQAB"; - - private final String ADMIN_ROLE = "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x@clients"; + private MockOIDCIdentityProvider server; // Credentials File, which contains "client_id" and "client_secret" private final String CREDENTIALS_FILE = "./src/test/resources/authentication/token/credentials_file.json"; - private final String ISSUER_URL = "https://dev-kt-aa9ne.us.auth0.com"; - private final String AUDIENCE = "https://dev-kt-aa9ne.us.auth0.com/api/v2/"; + private final String audience = "my-pulsar-cluster"; + + @BeforeClass(alwaysRun = true) + protected void setupClass() { + String clientSecret = "super-secret-client-secret"; + server = new MockOIDCIdentityProvider(clientSecret, audience, 3000); + } @BeforeMethod(alwaysRun = true) @Override protected void setup() throws Exception { conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); - conf.setAuthenticationRefreshCheckSeconds(5); + conf.setAuthenticationRefreshCheckSeconds(1); Set superUserRoles = new HashSet<>(); - superUserRoles.add(ADMIN_ROLE); + // Matches the role in th credentials file + superUserRoles.add("my-admin-role"); conf.setSuperUserRoles(superUserRoles); Set providers = new HashSet<>(); @@ -81,17 +90,18 @@ protected void setup() throws Exception { conf.setAuthenticationProviders(providers); conf.setBrokerClientAuthenticationPlugin(AuthenticationOAuth2.class.getName()); - conf.setBrokerClientAuthenticationParameters("{\n" - + " \"privateKey\": \"" + CREDENTIALS_FILE + "\",\n" - + " \"issuerUrl\": \"" + ISSUER_URL + "\",\n" - + " \"audience\": \"" + AUDIENCE + "\",\n" - + "}\n"); + final Map oauth2Param = new HashMap<>(); + oauth2Param.put("privateKey", CREDENTIALS_FILE); + oauth2Param.put("issuerUrl", server.getIssuer()); + oauth2Param.put("audience", audience); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory + .getMapper().getObjectMapper().writeValueAsString(oauth2Param)); conf.setClusterName("test"); // Set provider domain name Properties properties = new Properties(); - properties.setProperty("tokenPublicKey", TOKEN_TEST_PUBLIC_KEY); + properties.setProperty("tokenPublicKey", "data:;base64," + server.getBase64EncodedPublicKey()); conf.setProperties(properties); super.init(); @@ -104,9 +114,9 @@ protected final void clientSetup() throws Exception { // AuthenticationOAuth2 Authentication authentication = AuthenticationFactoryOAuth2.clientCredentials( - new URL(ISSUER_URL), + new URL(server.getIssuer()), path.toUri().toURL(), // key file path - AUDIENCE + audience ); admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrl.toString()) @@ -124,6 +134,11 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @AfterClass(alwaysRun = true) + protected void cleanupAfterClass() { + server.stop(); + } + @DataProvider(name = "batch") public Object[][] codecProvider() { return new Object[][] { { 0 }, { 1000 } }; @@ -210,12 +225,11 @@ public void testOAuth2TokenRefreshedWithoutReconnect() throws Exception { String accessTokenOld = producerImpl.getClientCnx().getAuthenticationDataProvider().getCommandData(); long lastDisconnectTime = producer.getLastDisconnectedTimestamp(); - // the token expire duration is 10 seconds, so we need to wait for the authenticationData refreshed + // the token expire duration is 3 seconds, so we need to wait for the authenticationData refreshed Awaitility.await() - .atLeast(10, TimeUnit.SECONDS) - .atMost(20, TimeUnit.SECONDS) + .atMost(10, TimeUnit.SECONDS) .with() - .pollInterval(Duration.ofSeconds(1)) + .pollInterval(Duration.ofMillis(250)) .untilAsserted(() -> { String accessTokenNew = producerImpl.getClientCnx().getAuthenticationDataProvider().getCommandData(); assertNotEquals(accessTokenNew, accessTokenOld); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TopicNameForInfiniteHttpCallGetSubscriptionsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TopicNameForInfiniteHttpCallGetSubscriptionsTest.java new file mode 100644 index 0000000000000..96f57a59bda13 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TopicNameForInfiniteHttpCallGetSubscriptionsTest.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.TopicType; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class TopicNameForInfiniteHttpCallGetSubscriptionsTest extends ProducerConsumerBase { + + @BeforeMethod(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + conf.setAllowAutoTopicCreationType(TopicType.PARTITIONED); + conf.setDefaultNumPartitions(1); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testInfiniteHttpCallGetOrCreateSubscriptions() throws Exception { + final String randomStr = UUID.randomUUID().toString().replaceAll("-", ""); + final String partitionedTopicName = "persistent://my-property/my-ns/tp1_" + randomStr; + final String topic_p0 = partitionedTopicName + TopicName.PARTITIONED_TOPIC_SUFFIX + "0"; + final String subscriptionName = "sub1"; + final String topicDLQ = topic_p0 + "-" + subscriptionName + "-DLQ"; + + admin.topics().createPartitionedTopic(partitionedTopicName, 2); + + // Do test. + ProducerAndConsumerEntry pcEntry = triggerDLQCreated(topic_p0, topicDLQ, subscriptionName); + admin.topics().getSubscriptions(topicDLQ); + admin.topics().createSubscription(topicDLQ, "s1", MessageId.earliest); + + // cleanup. + pcEntry.consumer.close(); + pcEntry.producer.close(); + admin.topics().deletePartitionedTopic(partitionedTopicName); + } + + @Test + public void testInfiniteHttpCallGetOrCreateSubscriptions2() throws Exception { + final String randomStr = UUID.randomUUID().toString().replaceAll("-", ""); + final String topicName = "persistent://my-property/my-ns/tp1_" + randomStr + "-partition-0-abc"; + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .create(); + + // Do test. + admin.topics().getSubscriptions(topicName); + admin.topics().createSubscription(topicName, "s1", MessageId.earliest); + + // cleanup. + producer.close(); + } + + @Test + public void testInfiniteHttpCallGetOrCreateSubscriptions3() throws Exception { + final String randomStr = UUID.randomUUID().toString().replaceAll("-", ""); + final String topicName = "persistent://my-property/my-ns/tp1_" + randomStr + "-partition-0"; + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .create(); + + // Do test. + admin.topics().getSubscriptions(topicName); + admin.topics().createSubscription(topicName, "s1", MessageId.earliest); + + // cleanup. + producer.close(); + } + + @AllArgsConstructor + private static class ProducerAndConsumerEntry { + private Producer producer; + private Consumer consumer; + } + + private ProducerAndConsumerEntry triggerDLQCreated(String topicName, String DLQName, String subscriptionName) throws Exception { + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .enableRetry(true) + .deadLetterPolicy(DeadLetterPolicy.builder().deadLetterTopic(DLQName).maxRedeliverCount(2).build()) + .receiverQueueSize(100) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .create(); + // send messages. + for (int i = 0; i < 5; i++) { + producer.newMessage() + .value("value-" + i) + .sendAsync(); + } + producer.flush(); + // trigger the DLQ created. + for (int i = 0; i < 20; i++) { + Message msg = consumer.receive(5, TimeUnit.SECONDS); + if (msg != null) { + consumer.reconsumeLater(msg, 1, TimeUnit.SECONDS); + } else { + break; + } + } + + return new ProducerAndConsumerEntry(producer, consumer); + } +} \ No newline at end of file diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java index 9f94d894632b8..e126d963a88f5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java @@ -2296,7 +2296,7 @@ public EncryptionKeyInfo getPrivateKey(String keyName, Map keyMe .addEncryptionKey("client-non-existant-rsa.pem") .cryptoKeyReader(new EncKeyReader()) .create(); - Assert.fail("Producer creation should not suceed if failing to read key"); + Assert.fail("Producer creation should not succeed if failing to read key"); } catch (Exception e) { // ok } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionHandlerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionHandlerTest.java new file mode 100644 index 0000000000000..f29d62db5f480 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionHandlerTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.common.util.FutureUtil; +import org.awaitility.Awaitility; +import org.awaitility.core.ConditionTimeoutException; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-impl") +public class ConnectionHandlerTest extends ProducerConsumerBase { + + private static final Backoff BACKOFF = new BackoffBuilder().setInitialTime(1, TimeUnit.MILLISECONDS) + .setMandatoryStop(1, TimeUnit.SECONDS) + .setMax(3, TimeUnit.SECONDS).create(); + private final ExecutorService executor = Executors.newFixedThreadPool(4); + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + executor.shutdown(); + } + + @Test(timeOut = 30000) + public void testSynchronousGrabCnx() { + for (int i = 0; i < 10; i++) { + final CompletableFuture future = new CompletableFuture<>(); + final int index = i; + final ConnectionHandler handler = new ConnectionHandler( + new MockedHandlerState((PulsarClientImpl) pulsarClient, "my-topic"), BACKOFF, + cnx -> { + future.complete(index); + return CompletableFuture.completedFuture(null); + }); + handler.grabCnx(); + Assert.assertEquals(future.join(), i); + } + } + + @Test + public void testConcurrentGrabCnx() { + final AtomicInteger cnt = new AtomicInteger(0); + final ConnectionHandler handler = new ConnectionHandler( + new MockedHandlerState((PulsarClientImpl) pulsarClient, "my-topic"), BACKOFF, + cnx -> { + cnt.incrementAndGet(); + return CompletableFuture.completedFuture(null); + }); + final int numGrab = 10; + for (int i = 0; i < numGrab; i++) { + handler.grabCnx(); + } + Awaitility.await().atMost(Duration.ofSeconds(3)).until(() -> cnt.get() > 0); + Assert.assertThrows(ConditionTimeoutException.class, + () -> Awaitility.await().atMost(Duration.ofMillis(500)).until(() -> cnt.get() == numGrab)); + Assert.assertEquals(cnt.get(), 1); + } + + @Test + public void testDuringConnectInvokeCount() throws IllegalAccessException { + // 1. connectionOpened completes with null + final AtomicBoolean duringConnect = spy(new AtomicBoolean()); + final ConnectionHandler handler1 = new ConnectionHandler( + new MockedHandlerState((PulsarClientImpl) pulsarClient, "my-topic"), BACKOFF, + cnx -> CompletableFuture.completedFuture(null)); + FieldUtils.writeField(handler1, "duringConnect", duringConnect, true); + handler1.grabCnx(); + Awaitility.await().atMost(Duration.ofSeconds(3)).until(() -> !duringConnect.get()); + verify(duringConnect, times(1)).compareAndSet(false, true); + verify(duringConnect, times(1)).set(false); + + // 2. connectionFailed is called + final ConnectionHandler handler2 = new ConnectionHandler( + new MockedHandlerState((PulsarClientImpl) pulsarClient, null), new MockedBackoff(), + cnx -> CompletableFuture.completedFuture(null)); + FieldUtils.writeField(handler2, "duringConnect", duringConnect, true); + handler2.grabCnx(); + Awaitility.await().atMost(Duration.ofSeconds(3)).until(() -> !duringConnect.get()); + verify(duringConnect, times(2)).compareAndSet(false, true); + verify(duringConnect, times(2)).set(false); + + // 3. connectionOpened completes exceptionally + final ConnectionHandler handler3 = new ConnectionHandler( + new MockedHandlerState((PulsarClientImpl) pulsarClient, "my-topic"), new MockedBackoff(), + cnx -> FutureUtil.failedFuture(new RuntimeException("fail"))); + FieldUtils.writeField(handler3, "duringConnect", duringConnect, true); + handler3.grabCnx(); + Awaitility.await().atMost(Duration.ofSeconds(3)).until(() -> !duringConnect.get()); + verify(duringConnect, times(3)).compareAndSet(false, true); + verify(duringConnect, times(3)).set(false); + } + + private static class MockedHandlerState extends HandlerState { + + public MockedHandlerState(PulsarClientImpl client, String topic) { + super(client, topic); + } + + @Override + String getHandlerName() { + return "mocked"; + } + } + + private static class MockedBackoff extends Backoff { + + // Set a large backoff so that reconnection won't happen in tests + public MockedBackoff() { + super(1, TimeUnit.HOURS, 2, TimeUnit.HOURS, 1, TimeUnit.HOURS); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java index fb564bd5083c1..e7613879d5152 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java @@ -31,9 +31,13 @@ import java.util.function.Supplier; import java.util.stream.IntStream; import io.netty.util.concurrent.Promise; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.common.api.proto.CommandCloseProducer; import org.apache.pulsar.common.util.netty.EventLoopUtil; +import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; @@ -80,6 +84,36 @@ public void testSingleIpAddress() throws Exception { eventLoop.shutdownGracefully(); } + @Test + public void testSelectConnectionForSameProducer() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://sample/standalone/ns/tp_"); + admin.topics().createNonPartitionedTopic(topicName); + final CommandCloseProducer commandCloseProducer = new CommandCloseProducer(); + // 10 connection per broker. + final PulsarClient clientWith10ConPerBroker = PulsarClient.builder().connectionsPerBroker(10) + .serviceUrl(lookupUrl.toString()).build(); + ProducerImpl producer = (ProducerImpl) clientWith10ConPerBroker.newProducer().topic(topicName).create(); + commandCloseProducer.setProducerId(producer.producerId); + // An error will be reported when the Producer reconnects using a different connection. + // If no error is reported, the same connection was used when reconnecting. + for (int i = 0; i < 20; i++) { + // Trigger reconnect + ClientCnx cnx = producer.getClientCnx(); + if (cnx != null) { + cnx.handleCloseProducer(commandCloseProducer); + Awaitility.await().untilAsserted(() -> + Assert.assertEquals(producer.getState().toString(), HandlerState.State.Ready.toString(), + "The producer uses a different connection when reconnecting") + ); + } + } + + // cleanup. + producer.close(); + clientWith10ConPerBroker.close(); + admin.topics().delete(topicName); + } + @Test public void testDoubleIpAddress() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); @@ -200,14 +234,16 @@ protected void doResolveAll(SocketAddress socketAddress, Promise promise) throws ClientCnx cnx = pool.getConnection( InetSocketAddress.createUnresolved("proxy", 9999), - InetSocketAddress.createUnresolved("proxy", 9999)).get(); + InetSocketAddress.createUnresolved("proxy", 9999), + pool.genRandomKeyToSelectCon()).get(); Assert.assertEquals(cnx.remoteHostName, "proxy"); Assert.assertNull(cnx.proxyToTargetBrokerAddress); cnx.close(); cnx = pool.getConnection( InetSocketAddress.createUnresolved("broker", 9999), - InetSocketAddress.createUnresolved("proxy", 9999)).get(); + InetSocketAddress.createUnresolved("proxy", 9999), + pool.genRandomKeyToSelectCon()).get(); Assert.assertEquals(cnx.remoteHostName, "proxy"); Assert.assertEquals(cnx.proxyToTargetBrokerAddress, "broker:9999"); cnx.close(); @@ -215,7 +251,8 @@ protected void doResolveAll(SocketAddress socketAddress, Promise promise) throws cnx = pool.getConnection( InetSocketAddress.createUnresolved("broker", 9999), - InetSocketAddress.createUnresolved("broker", 9999)).get(); + InetSocketAddress.createUnresolved("broker", 9999), + pool.genRandomKeyToSelectCon()).get(); Assert.assertEquals(cnx.remoteHostName, "broker"); Assert.assertNull(cnx.proxyToTargetBrokerAddress); cnx.close(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/HierarchyTopicAutoCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/HierarchyTopicAutoCreationTest.java new file mode 100644 index 0000000000000..8c93b293c41a4 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/HierarchyTopicAutoCreationTest.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import lombok.Cleanup; +import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import java.util.List; +import java.util.UUID; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; +import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.metadata.api.MetadataCache; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker-impl") +@Slf4j +public class HierarchyTopicAutoCreationTest extends ProducerConsumerBase { + + @Override + @BeforeMethod + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @Override + @AfterMethod(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test(invocationCount = 3) + @SneakyThrows + public void testPartitionedTopicAutoCreation() { + // Create namespace + final String namespace = "public/testPartitionedTopicAutoCreation"; + admin.namespaces().createNamespace(namespace); + // Set policies + final AutoTopicCreationOverride expectedPolicies = AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(true) + .topicType("partitioned") + .defaultNumPartitions(1) + .build(); + admin.namespaces().setAutoTopicCreation(namespace, expectedPolicies); + // Double-check the policies + final AutoTopicCreationOverride nsAutoTopicCreationOverride = admin.namespaces() + .getAutoTopicCreation(namespace); + Assert.assertEquals(nsAutoTopicCreationOverride, expectedPolicies); + // Background invalidate cache + final MetadataCache nsCache = pulsar.getPulsarResources().getNamespaceResources().getCache(); + final Thread t1 = new Thread(() -> { + while (true) { + nsCache.invalidate("/admin/policies/" + namespace); + } + }); + t1.start(); + + // trigger auto-creation + final String topicName = "persistent://" + namespace + "/test-" + UUID.randomUUID(); + @Cleanup final Producer producer = pulsarClient.newProducer() + .topic(topicName) + .create(); + final List topics = admin.topics().getList(namespace); + Assert.assertEquals(topics.size(), 1); // expect only one topic + Assert.assertEquals(topics.get(0), + TopicName.get(topicName).getPartition(0).toString()); // expect partitioned topic + + // double-check policies + final AutoTopicCreationOverride actualPolicies2 = admin.namespaces().getAutoTopicCreation(namespace); + Assert.assertEquals(actualPolicies2, expectedPolicies); + + t1.interrupt(); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java index 8e508b6cf2068..77405e142013a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java @@ -32,6 +32,7 @@ import io.jsonwebtoken.SignatureAlgorithm; import lombok.Cleanup; +import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; @@ -49,6 +50,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationToken; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -83,6 +85,7 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @SneakyThrows protected void internalSetUpForBroker() { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); @@ -114,6 +117,25 @@ protected void internalSetUpForBroker() { conf.setAuthenticationProviders(providers); conf.setNumExecutorThreadPoolSize(5); + Set tlsProtocols = Sets.newConcurrentHashSet(); + tlsProtocols.add("TLSv1.3"); + tlsProtocols.add("TLSv1.2"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationKeyStoreTls.class.getName()); + Map authParams = new HashMap<>(); + authParams.put(AuthenticationKeyStoreTls.KEYSTORE_TYPE, KEYSTORE_TYPE); + authParams.put(AuthenticationKeyStoreTls.KEYSTORE_PATH, CLIENT_KEYSTORE_FILE_PATH); + authParams.put(AuthenticationKeyStoreTls.KEYSTORE_PW, CLIENT_KEYSTORE_PW); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory.getMapper() + .getObjectMapper().writeValueAsString(authParams)); + conf.setBrokerClientTlsEnabled(true); + conf.setBrokerClientTlsEnabledWithKeyStore(true); + conf.setBrokerClientTlsTrustStore(BROKER_TRUSTSTORE_FILE_PATH); + conf.setBrokerClientTlsTrustStorePassword(BROKER_TRUSTSTORE_PW); + conf.setBrokerClientTlsKeyStore(CLIENT_KEYSTORE_FILE_PATH); + conf.setBrokerClientTlsKeyStoreType(KEYSTORE_TYPE); + conf.setBrokerClientTlsKeyStorePassword(CLIENT_KEYSTORE_PW); + conf.setBrokerClientTlsProtocols(tlsProtocols); + } protected void internalSetUpForClient(boolean addCertificates, String lookupUrl) throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingDeduplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingDeduplicationTest.java new file mode 100644 index 0000000000000..5e590414132a5 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingDeduplicationTest.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static org.apache.pulsar.client.impl.MessageChunkingSharedTest.sendChunk; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-impl") +public class MessageChunkingDeduplicationTest extends ProducerConsumerBase { + + @BeforeClass + @Override + protected void setup() throws Exception { + this.conf.setBrokerDeduplicationEnabled(true); + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testSendChunkMessageWithSameSequenceID() throws Exception { + String topicName = "persistent://my-property/my-ns/testSendChunkMessageWithSameSequenceID"; + String producerName = "test-producer"; + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.STRING) + .subscriptionName("test-sub") + .topic(topicName) + .subscribe(); + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.STRING) + .producerName(producerName) + .topic(topicName) + .enableChunking(true) + .enableBatching(false) + .create(); + int messageSize = 6000; // payload size in KB + String message = "a".repeat(messageSize * 1000); + producer.newMessage().value(message).sequenceId(10).send(); + Message msg = consumer.receive(10, TimeUnit.SECONDS); + assertNotNull(msg); + assertTrue(msg.getMessageId() instanceof ChunkMessageIdImpl); + assertEquals(msg.getValue(), message); + producer.newMessage().value(message).sequenceId(10).send(); + msg = consumer.receive(3, TimeUnit.SECONDS); + assertNull(msg); + } + + @Test + public void testDeduplicateChunksInSingleChunkMessages() throws Exception { + String topicName = "persistent://my-property/my-ns/testDeduplicateChunksInSingleChunkMessage"; + String producerName = "test-producer"; + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.STRING) + .subscriptionName("test-sub") + .topic(topicName) + .subscribe(); + final PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(topicName).get().orElse(null); + assertNotNull(persistentTopic); + sendChunk(persistentTopic, producerName, 1, 0, 2); + sendChunk(persistentTopic, producerName, 1, 1, 2); + sendChunk(persistentTopic, producerName, 1, 1, 2); + + Message message = consumer.receive(15, TimeUnit.SECONDS); + assertEquals(message.getData().length, 2); + + sendChunk(persistentTopic, producerName, 2, 0, 3); + sendChunk(persistentTopic, producerName, 2, 1, 3); + sendChunk(persistentTopic, producerName, 2, 1, 3); + sendChunk(persistentTopic, producerName, 2, 2, 3); + message = consumer.receive(20, TimeUnit.SECONDS); + assertEquals(message.getData().length, 3); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java index 163c42d835b35..3d24d3746d66a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java @@ -23,6 +23,7 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -34,6 +35,7 @@ import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.ConsumerBuilder; @@ -45,7 +47,6 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; -import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; import org.awaitility.Awaitility; @@ -217,7 +218,7 @@ private static void sendNonChunk(final PersistentTopic persistentTopic, sendChunk(persistentTopic, producerName, sequenceId, null, null); } - private static void sendChunk(final PersistentTopic persistentTopic, + protected static void sendChunk(final PersistentTopic persistentTopic, final String producerName, final long sequenceId, final Integer chunkId, @@ -233,16 +234,33 @@ private static void sendChunk(final PersistentTopic persistentTopic, metadata.setTotalChunkMsgSize(numChunks); } final ByteBuf buf = Commands.serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, metadata, - PulsarByteBufAllocator.DEFAULT.buffer(1)); - persistentTopic.publishMessage(buf, (e, ledgerId, entryId) -> { - String name = producerName + "-" + sequenceId; - if (chunkId != null) { - name += "-" + chunkId + "-" + numChunks; + Unpooled.wrappedBuffer("a".getBytes())); + persistentTopic.publishMessage(buf, new Topic.PublishContext() { + @Override + public boolean isChunked() { + return chunkId != null; } - if (e == null) { - log.info("Sent {} to ({}, {})", name, ledgerId, entryId); - } else { - log.error("Failed to send {}: {}", name, e.getMessage()); + + @Override + public String getProducerName() { + return producerName; + } + + public long getSequenceId() { + return sequenceId; + } + + @Override + public void completed(Exception e, long ledgerId, long entryId) { + String name = producerName + "-" + sequenceId; + if (chunkId != null) { + name += "-" + chunkId + "-" + numChunks; + } + if (e == null) { + log.info("Sent {} to ({}, {})", name, ledgerId, entryId); + } else { + log.error("Failed to send {}: {}", name, e.getMessage()); + } } }); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java index 2797d66f7fe21..f266afd8a2ee1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java @@ -356,6 +356,80 @@ public void testMaxPendingChunkMessages() throws Exception { assertNull(consumer.receive(5, TimeUnit.SECONDS)); } + @Test + public void testResendChunkMessagesWithoutAckHole() throws Exception { + log.info("-- Starting {} test --", methodName); + final String topicName = "persistent://my-property/my-ns/testResendChunkMessagesWithoutAckHole"; + final String subName = "my-subscriber-name"; + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subName) + .maxPendingChunkedMessage(10) + .autoAckOldestChunkedMessageOnQueueFull(true) + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .chunkMaxMessageSize(100) + .enableChunking(true) + .enableBatching(false) + .create(); + + sendSingleChunk(producer, "0", 0, 2); + + sendSingleChunk(producer, "0", 0, 2); // Resending the first chunk + sendSingleChunk(producer, "0", 1, 2); + + Message receivedMsg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals(receivedMsg.getValue(), "chunk-0-0|chunk-0-1|"); + consumer.acknowledge(receivedMsg); + assertEquals(admin.topics().getStats(topicName).getSubscriptions().get(subName) + .getNonContiguousDeletedMessagesRanges(), 0); + } + + @Test + public void testResendChunkMessages() throws Exception { + log.info("-- Starting {} test --", methodName); + final String topicName = "persistent://my-property/my-ns/testResendChunkMessages"; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName("my-subscriber-name") + .maxPendingChunkedMessage(10) + .autoAckOldestChunkedMessageOnQueueFull(true) + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .chunkMaxMessageSize(100) + .enableChunking(true) + .enableBatching(false) + .create(); + + sendSingleChunk(producer, "0", 0, 2); + + sendSingleChunk(producer, "0", 0, 2); // Resending the first chunk + sendSingleChunk(producer, "1", 0, 3); // This is for testing the interwoven chunked message + sendSingleChunk(producer, "1", 1, 3); + sendSingleChunk(producer, "1", 0, 3); // Resending the UUID-1 chunked message + + sendSingleChunk(producer, "0", 1, 2); + + Message receivedMsg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals(receivedMsg.getValue(), "chunk-0-0|chunk-0-1|"); + consumer.acknowledge(receivedMsg); + + sendSingleChunk(producer, "1", 1, 3); + sendSingleChunk(producer, "1", 2, 3); + + receivedMsg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals(receivedMsg.getValue(), "chunk-1-0|chunk-1-1|chunk-1-2|"); + consumer.acknowledge(receivedMsg); + Assert.assertEquals(((ConsumerImpl) consumer).getAvailablePermits(), 8); + } + /** * Validate that chunking is not supported with batching and non-persistent topic * diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java index a41aac9bd457f..d9bbc6a9d742a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java @@ -211,7 +211,7 @@ public void testReadMessageWithBatchingWithMessageInclusive() throws Exception { reader.close(); } - @Test(timeOut = 10000) + @Test public void testReaderWithTimeLong() throws Exception { String ns = "my-property/my-ns"; String topic = "persistent://" + ns + "/testReadFromPartition" + UUID.randomUUID(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java index b4d01e263bc7a..7812844bdc200 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java @@ -319,7 +319,7 @@ public void testNegativeAcksDeleteFromUnackedTracker() throws Exception { negativeAcksTracker.close(); } - @Test(timeOut = 10000) + @Test public void testNegativeAcksWithBatchAckEnabled() throws Exception { cleanup(); conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java index 76936334eb0ba..b9139dabdf021 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java @@ -85,6 +85,7 @@ public void setup() throws Exception { // set isTcpLookup = true, to use BinaryProtoLookupService to get topics for a pattern. isTcpLookup = true; + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java index 09a9a003f3ec5..2ae8ef1d03902 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertSame; import static org.testng.Assert.assertTrue; @@ -26,6 +27,7 @@ import com.google.common.collect.Lists; +import java.time.Duration; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; @@ -33,23 +35,31 @@ import java.util.regex.Pattern; import java.util.stream.IntStream; +import io.netty.util.Timeout; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.InjectedClientCnxClientBuilder; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.RegexSubscriptionMode; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.api.proto.BaseCommand; +import org.apache.pulsar.common.api.proto.CommandWatchTopicListSuccess; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @Test(groups = "broker-impl") @@ -65,6 +75,7 @@ public void setup() throws Exception { isTcpLookup = true; // enabled transaction, to test pattern consumers not subscribe to transaction system topic. conf.setTransactionCoordinatorEnabled(true); + conf.setSubscriptionPatternMaxLength(10000); super.internalSetup(); super.producerBaseSetup(); } @@ -208,6 +219,12 @@ public void testBinaryProtoToGetTopicsOfNamespacePersistent() throws Exception { .subscribe(); assertTrue(consumer.getTopic().startsWith(PatternMultiTopicsConsumerImpl.DUMMY_TOPIC_NAME_PREFIX)); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -285,6 +302,12 @@ public void testBinaryProtoSubscribeAllTopicOfNamespace() throws Exception { .subscribe(); assertTrue(consumer.getTopic().startsWith(PatternMultiTopicsConsumerImpl.DUMMY_TOPIC_NAME_PREFIX)); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -362,6 +385,12 @@ public void testBinaryProtoToGetTopicsOfNamespaceNonPersistent() throws Exceptio .subscriptionTopicsMode(RegexSubscriptionMode.NonPersistentOnly) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -453,6 +482,12 @@ public void testBinaryProtoToGetTopicsOfNamespaceAll() throws Exception { .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -523,6 +558,11 @@ public void testStartEmptyPatternConsumer() throws Exception { .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) .receiverQueueSize(4) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); // 3. verify consumer get methods, to get 5 number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); @@ -587,13 +627,28 @@ public void testStartEmptyPatternConsumer() throws Exception { producer3.close(); } - @Test(timeOut = testTimeout) - public void testAutoSubscribePatterConsumerFromBrokerWatcher() throws Exception { - String key = "AutoSubscribePatternConsumer"; - String subscriptionName = "my-ex-subscription-" + key; + @DataProvider(name= "delayTypesOfWatchingTopics") + public Object[][] delayTypesOfWatchingTopics(){ + return new Object[][]{ + {true}, + {false} + }; + } - Pattern pattern = Pattern.compile("persistent://my-property/my-ns/pattern-topic.*"); - Consumer consumer = pulsarClient.newConsumer() + @Test(timeOut = testTimeout, dataProvider = "delayTypesOfWatchingTopics") + public void testAutoSubscribePatterConsumerFromBrokerWatcher(boolean delayWatchingTopics) throws Exception { + final String key = "AutoSubscribePatternConsumer"; + final String subscriptionName = "my-ex-subscription-" + key; + final Pattern pattern = Pattern.compile("persistent://my-property/my-ns/pattern-topic.*"); + + PulsarClient client = null; + if (delayWatchingTopics) { + client = createDelayWatchTopicsClient(); + } else { + client = pulsarClient; + } + + Consumer consumer = client.newConsumer() .topicsPattern(pattern) // Disable automatic discovery. .patternAutoDiscoveryPeriod(1000) @@ -618,7 +673,162 @@ public void testAutoSubscribePatterConsumerFromBrokerWatcher() throws Exception assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 1); }); + // cleanup. + consumer.close(); + admin.topics().deletePartitionedTopic(topicName); + if (delayWatchingTopics) { + client.close(); + } + } + + @DataProvider(name= "regexpConsumerArgs") + public Object[][] regexpConsumerArgs(){ + return new Object[][]{ + {true, true}, + {true, false}, + {false, true}, + {false, false} + }; + } + + private void waitForTopicListWatcherStarted(Consumer consumer) { + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + log.info("isDone: {}, isCompletedExceptionally: {}", completableFuture.isDone(), + completableFuture.isCompletedExceptionally()); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + } + + @Test(timeOut = testTimeout, dataProvider = "regexpConsumerArgs") + public void testPreciseRegexpSubscribe(boolean partitioned, boolean createTopicAfterWatcherStarted) throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final String subscriptionName = "s1"; + final Pattern pattern = Pattern.compile(String.format("%s$", topicName)); + + Consumer consumer = pulsarClient.newConsumer() + .topicsPattern(pattern) + // Disable automatic discovery. + .patternAutoDiscoveryPeriod(1000) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) + .receiverQueueSize(4) + .subscribe(); + if (createTopicAfterWatcherStarted) { + waitForTopicListWatcherStarted(consumer); + } + + // 1. create topic. + if (partitioned) { + admin.topics().createPartitionedTopic(topicName, 1); + } else { + admin.topics().createNonPartitionedTopic(topicName); + } + + // 2. verify consumer can subscribe the topic. + assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); + Awaitility.await().untilAsserted(() -> { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitions().size(), 1); + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getConsumers().size(), 1); + if (partitioned) { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 1); + } else { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 0); + } + }); + + // cleanup. consumer.close(); + if (partitioned) { + admin.topics().deletePartitionedTopic(topicName); + } else { + admin.topics().delete(topicName); + } + } + + @DataProvider(name= "partitioned") + public Object[][] partitioned(){ + return new Object[][]{ + {true}, + {true} + }; + } + + @Test(timeOut = 240 * 1000, dataProvider = "partitioned") + public void testPreciseRegexpSubscribeDisabledTopicWatcher(boolean partitioned) throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final String subscriptionName = "s1"; + final Pattern pattern = Pattern.compile(String.format("%s$", topicName)); + + // Close all ServerCnx by close client-side sockets to make the config changes effect. + pulsar.getConfig().setEnableBrokerSideSubscriptionPatternEvaluation(false); + reconnectAllConnections(); + + Consumer consumer = pulsarClient.newConsumer() + .topicsPattern(pattern) + // Disable brokerSideSubscriptionPatternEvaluation will leading disable topic list watcher. + // So set patternAutoDiscoveryPeriod to a little value. + .patternAutoDiscoveryPeriod(1) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) + .receiverQueueSize(4) + .subscribe(); + + // 1. create topic. + if (partitioned) { + admin.topics().createPartitionedTopic(topicName, 1); + } else { + admin.topics().createNonPartitionedTopic(topicName); + } + + // 2. verify consumer can subscribe the topic. + // Since the minimum value of `patternAutoDiscoveryPeriod` is 60s, we set the test timeout to a triple value. + assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); + Awaitility.await().atMost(Duration.ofMinutes(3)).untilAsserted(() -> { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitions().size(), 1); + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getConsumers().size(), 1); + if (partitioned) { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 1); + } else { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 0); + } + }); + + // cleanup. + consumer.close(); + if (partitioned) { + admin.topics().deletePartitionedTopic(topicName); + } else { + admin.topics().delete(topicName); + } + // Close all ServerCnx by close client-side sockets to make the config changes effect. + pulsar.getConfig().setEnableBrokerSideSubscriptionPatternEvaluation(true); + reconnectAllConnections(); + } + + private PulsarClient createDelayWatchTopicsClient() throws Exception { + ClientBuilderImpl clientBuilder = (ClientBuilderImpl) PulsarClient.builder().serviceUrl(lookupUrl.toString()); + return InjectedClientCnxClientBuilder.create(clientBuilder, + (conf, eventLoopGroup) -> new ClientCnx(conf, eventLoopGroup) { + public CompletableFuture newWatchTopicList( + BaseCommand command, long requestId) { + // Inject 2 seconds delay when sending command New Watch Topics. + CompletableFuture res = new CompletableFuture<>(); + new Thread(() -> { + sleepSeconds(2); + super.newWatchTopicList(command, requestId).whenComplete((v, ex) -> { + if (ex != null) { + res.completeExceptionally(ex); + } else { + res.complete(v); + } + }); + }).start(); + return res; + } + }); } // simulate subscribe a pattern which has 3 topics, but then matched topic added in. @@ -663,6 +873,12 @@ public void testAutoSubscribePatternConsumer() throws Exception { .receiverQueueSize(4) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + assertTrue(consumer instanceof PatternMultiTopicsConsumerImpl); // 4. verify consumer get methods, to get 6 number of partitions and topics: 6=1+2+3 @@ -773,6 +989,12 @@ public void testAutoUnsubscribePatternConsumer() throws Exception { .receiverQueueSize(4) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + assertTrue(consumer instanceof PatternMultiTopicsConsumerImpl); // 4. verify consumer get methods, to get 0 number of partitions and topics: 6=1+2+3 @@ -809,7 +1031,9 @@ public void testAutoUnsubscribePatternConsumer() throws Exception { // 7. call recheckTopics to unsubscribe topic 1,3, verify topics number: 2=6-1-3 log.debug("recheck topics change"); PatternMultiTopicsConsumerImpl consumer1 = ((PatternMultiTopicsConsumerImpl) consumer); - consumer1.run(consumer1.getRecheckPatternTimeout()); + Timeout recheckPatternTimeout = spy(consumer1.getRecheckPatternTimeout()); + doReturn(false).when(recheckPatternTimeout).isCancelled(); + consumer1.run(recheckPatternTimeout); Thread.sleep(100); assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitions().size(), 2); assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getConsumers().size(), 2); @@ -857,6 +1081,12 @@ public void testTopicDeletion() throws Exception { .subscriptionName("sub") .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + assertTrue(consumer instanceof PatternMultiTopicsConsumerImpl); PatternMultiTopicsConsumerImpl consumerImpl = (PatternMultiTopicsConsumerImpl) consumer; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java new file mode 100644 index 0000000000000..240d8d2304768 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.broker.service.ServerCnx; +import org.apache.pulsar.client.api.BatcherBuilder; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.api.proto.CommandCloseProducer; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +/** + * Different with {@link org.apache.pulsar.client.api.SimpleProducerConsumerTest}, this class can visit the variables + * of {@link ConsumerImpl} or {@link ProducerImpl} which have protected or default access modifiers. + */ +@Slf4j +@Test(groups = "broker-impl") +public class ProducerConsumerInternalTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testSameProducerRegisterTwice() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + admin.topics().createNonPartitionedTopic(topicName); + + // Create producer using default producerName. + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer().topic(topicName).create(); + ServiceProducer serviceProducer = getServiceProducer(producer, topicName); + + // Remove producer maintained by server cnx. To make it can register the second time. + removeServiceProducerMaintainedByServerCnx(serviceProducer); + + // Trigger the client producer reconnect. + CommandCloseProducer commandCloseProducer = new CommandCloseProducer(); + commandCloseProducer.setProducerId(producer.producerId); + producer.getClientCnx().handleCloseProducer(commandCloseProducer); + + // Verify the reconnection will be success. + Awaitility.await().untilAsserted(() -> { + assertEquals(producer.getState().toString(), "Ready"); + }); + } + + @Test + public void testSameProducerRegisterTwiceWithSpecifiedProducerName() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String pName = "p1"; + admin.topics().createNonPartitionedTopic(topicName); + + // Create producer using default producerName. + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer().producerName(pName).topic(topicName).create(); + ServiceProducer serviceProducer = getServiceProducer(producer, topicName); + + // Remove producer maintained by server cnx. To make it can register the second time. + removeServiceProducerMaintainedByServerCnx(serviceProducer); + + // Trigger the client producer reconnect. + CommandCloseProducer commandCloseProducer = new CommandCloseProducer(); + commandCloseProducer.setProducerId(producer.producerId); + producer.getClientCnx().handleCloseProducer(commandCloseProducer); + + // Verify the reconnection will be success. + Awaitility.await().untilAsserted(() -> { + assertEquals(producer.getState().toString(), "Ready", "The producer registration failed"); + }); + } + + private void removeServiceProducerMaintainedByServerCnx(ServiceProducer serviceProducer) { + ServerCnx serverCnx = (ServerCnx) serviceProducer.getServiceProducer().getCnx(); + serverCnx.removedProducer(serviceProducer.getServiceProducer()); + Awaitility.await().untilAsserted(() -> { + assertFalse(serverCnx.getProducers().containsKey(serviceProducer.getServiceProducer().getProducerId())); + }); + } + + @Test + public void testExclusiveConsumerWillAlwaysRetryEvenIfReceivedConsumerBusyError() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "subscription1"; + admin.topics().createNonPartitionedTopic(topicName); + + final ConsumerImpl consumer = (ConsumerImpl) pulsarClient.newConsumer().topic(topicName.toString()) + .subscriptionType(SubscriptionType.Exclusive).subscriptionName(subscriptionName).subscribe(); + + ClientCnx clientCnx = consumer.getClientCnx(); + ServerCnx serverCnx = (ServerCnx) pulsar.getBrokerService() + .getTopic(topicName,false).join().get().getSubscription(subscriptionName) + .getDispatcher().getConsumers().get(0).cnx(); + + // Make a disconnect to trigger broker remove the consumer which related this connection. + // Make the second subscribe runs after the broker removing the old consumer, then it will receive + // an error: "Exclusive consumer is already connected" + final CountDownLatch countDownLatch = new CountDownLatch(1); + serverCnx.execute(() -> { + try { + countDownLatch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + clientCnx.close(); + Thread.sleep(1000); + countDownLatch.countDown(); + + // Verify the consumer will always retry subscribe event received ConsumerBusy error. + Awaitility.await().untilAsserted(() -> { + assertEquals(consumer.getState(), HandlerState.State.Ready); + }); + + // cleanup. + consumer.close(); + admin.topics().delete(topicName, false); + } + + @DataProvider(name = "containerBuilder") + public Object[][] containerBuilderProvider() { + return new Object[][] { + { BatcherBuilder.DEFAULT }, + { BatcherBuilder.KEY_BASED } + }; + } + + @Test(timeOut = 30000, dataProvider = "containerBuilder") + public void testSendTimerCheckForBatchContainer(BatcherBuilder batcherBuilder) throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + @Cleanup Producer producer = pulsarClient.newProducer().topic(topicName) + .batcherBuilder(batcherBuilder) + .sendTimeout(1, TimeUnit.SECONDS) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .batchingMaxMessages(1000) + .create(); + + log.info("Before sendAsync msg-0: {}", System.nanoTime()); + CompletableFuture future = producer.sendAsync("msg-0".getBytes()); + future.thenAccept(msgId -> log.info("msg-0 done: {} (msgId: {})", System.nanoTime(), msgId)); + future.get(); // t: the current time point + + ((ProducerImpl) producer).triggerSendTimer(); // t+1000ms && t+2000ms: run() will be called again + + Thread.sleep(1950); // t+2050ms: the batch timer is expired, which happens after run() is called + log.info("Before sendAsync msg-1: {}", System.nanoTime()); + future = producer.sendAsync("msg-1".getBytes()); + future.thenAccept(msgId -> log.info("msg-1 done: {} (msgId: {})", System.nanoTime(), msgId)); + future.get(); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java index 3ec784e248cba..d776fdb0ed915 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java @@ -23,7 +23,12 @@ import static org.mockito.Mockito.mock; import io.netty.buffer.ByteBufAllocator; import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import org.apache.pulsar.client.api.CompressionType; +import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; @@ -35,9 +40,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.nio.charset.StandardCharsets; -import java.util.concurrent.TimeUnit; - @Test(groups = "broker-impl") public class ProducerMemoryLimitTest extends ProducerConsumerBase { @@ -191,6 +193,40 @@ public void testProducerCloseMemoryRelease() throws Exception { Assert.assertEquals(memoryLimitController.currentUsage(), 0); } + @Test(timeOut = 10_000) + public void testProducerBlockReserveMemory() throws Exception { + replacePulsarClient(PulsarClient.builder(). + serviceUrl(lookupUrl.toString()) + .memoryLimit(1, SizeUnit.KILO_BYTES)); + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic("testProducerMemoryLimit") + .sendTimeout(5, TimeUnit.SECONDS) + .compressionType(CompressionType.SNAPPY) + .messageRoutingMode(MessageRoutingMode.RoundRobinPartition) + .maxPendingMessages(0) + .blockIfQueueFull(true) + .enableBatching(true) + .batchingMaxMessages(100) + .batchingMaxBytes(65536) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .create(); + int msgCount = 5; + CountDownLatch cdl = new CountDownLatch(msgCount); + for (int i = 0; i < msgCount; i++) { + producer.sendAsync("memory-test".getBytes(StandardCharsets.UTF_8)).whenComplete(((messageId, throwable) -> { + cdl.countDown(); + })); + } + + cdl.await(); + + producer.close(); + PulsarClientImpl clientImpl = (PulsarClientImpl) this.pulsarClient; + final MemoryLimitController memoryLimitController = clientImpl.getMemoryLimitController(); + Assert.assertEquals(memoryLimitController.currentUsage(), 0); + } + private void initClientWithMemoryLimit() throws PulsarClientException { replacePulsarClient(PulsarClient.builder(). serviceUrl(lookupUrl.toString()) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java index 0ba8511c0ca74..4f7ac6e8de942 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java @@ -122,7 +122,7 @@ public CompletableFuture getConnection(String topic) { result.completeExceptionally(new IOException("New connections are rejected.")); return result; } else { - return super.getConnection(topic); + return super.getConnection(topic, getCnxPool().genRandomKeyToSelectCon()); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java index 9fa834a166cda..d79a31c07f218 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java @@ -19,8 +19,10 @@ package org.apache.pulsar.client.impl; -import static org.apache.pulsar.common.api.proto.CompressionType.LZ4; import static org.apache.pulsar.common.api.proto.CompressionType.NONE; +import static org.apache.pulsar.common.api.proto.CompressionType.ZSTD; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertTrue; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import java.io.IOException; @@ -55,7 +57,7 @@ public class RawBatchMessageContainerImplTest { public void setEncryptionAndCompression(boolean encrypt, boolean compress) { if (compress) { - compressionType = LZ4; + compressionType = ZSTD; } else { compressionType = NONE; } @@ -100,14 +102,24 @@ public MessageImpl createMessage(String topic, String value, int entryId) { @BeforeMethod public void setup() throws Exception { - setEncryptionAndCompression(false, false); + setEncryptionAndCompression(false, true); } - @Test - public void testToByteBuf() throws IOException { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(2); + @Test(timeOut = 20000) + public void testToByteBufWithBatchLimit()throws IOException { + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); + String topic = "my-topic"; - container.add(createMessage(topic, "hi-1", 0), null); - container.add(createMessage(topic, "hi-2", 1), null); + MessageImpl message1 = createMessage(topic, "hi-1", 0); + boolean hasEnoughSpase1 = container.haveEnoughSpace(message1); + var full1 = container.add(message1, null); + assertFalse(full1); + assertTrue(hasEnoughSpase1); + MessageImpl message2 = createMessage(topic, "hi-2", 1); + boolean hasEnoughSpase2 = container.haveEnoughSpace(message2); + assertFalse(hasEnoughSpase2); + var full2 = container.add(message2, null); + assertFalse(full2); + ByteBuf buf = container.toByteBuf(); @@ -126,18 +138,23 @@ public void testToByteBuf() throws IOException { MessageMetadata metadata = singleMessageMetadataAndPayload.getMessageBuilder(); Assert.assertEquals(metadata.getNumMessagesInBatch(), 2); Assert.assertEquals(metadata.getHighestSequenceId(), 1); - Assert.assertEquals(metadata.getCompression(), NONE); + Assert.assertEquals(metadata.getCompression(), ZSTD); + + CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(compressionType); + ByteBuf payload = codec.decode(metadataAndPayload, metadata.getUncompressedSize()); SingleMessageMetadata messageMetadata = new SingleMessageMetadata(); + messageMetadata.setCompactedOut(true); ByteBuf payload1 = Commands.deSerializeSingleMessageInBatch( - singleMessageMetadataAndPayload.getPayload(), messageMetadata, 0, 2); + payload, messageMetadata, 0, 2); ByteBuf payload2 = Commands.deSerializeSingleMessageInBatch( - singleMessageMetadataAndPayload.getPayload(), messageMetadata, 1, 2); + payload, messageMetadata, 1, 2); Assert.assertEquals(payload1.toString(Charset.defaultCharset()), "hi-1"); Assert.assertEquals(payload2.toString(Charset.defaultCharset()), "hi-2"); payload1.release(); payload2.release(); + payload.release(); singleMessageMetadataAndPayload.release(); metadataAndPayload.release(); buf.release(); @@ -147,7 +164,7 @@ public void testToByteBuf() throws IOException { public void testToByteBufWithCompressionAndEncryption() throws IOException { setEncryptionAndCompression(true, true); - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(2); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); container.setCryptoKeyReader(cryptoKeyReader); String topic = "my-topic"; container.add(createMessage(topic, "hi-1", 0), null); @@ -169,7 +186,7 @@ public void testToByteBufWithCompressionAndEncryption() throws IOException { MessageMetadata metadata = singleMessageMetadataAndPayload.getMessageBuilder(); Assert.assertEquals(metadata.getNumMessagesInBatch(), 2); Assert.assertEquals(metadata.getHighestSequenceId(), 1); - Assert.assertEquals(metadata.getCompression(), compressionType); + Assert.assertEquals(metadata.getCompression(), ZSTD); ByteBuf payload = singleMessageMetadataAndPayload.getPayload(); int maxDecryptedSize = msgCrypto.getMaxOutputSize(payload.readableBytes()); @@ -197,7 +214,7 @@ public void testToByteBufWithCompressionAndEncryption() throws IOException { @Test public void testToByteBufWithSingleMessage() throws IOException { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(2); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; container.add(createMessage(topic, "hi-1", 0), null); ByteBuf buf = container.toByteBuf(); @@ -218,34 +235,43 @@ public void testToByteBufWithSingleMessage() throws IOException { MessageMetadata metadata = singleMessageMetadataAndPayload.getMessageBuilder(); Assert.assertEquals(metadata.getNumMessagesInBatch(), 1); Assert.assertEquals(metadata.getHighestSequenceId(), 0); - Assert.assertEquals(metadata.getCompression(), NONE); + Assert.assertEquals(metadata.getCompression(), ZSTD); + + CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(compressionType); + ByteBuf payload = codec.decode(metadataAndPayload, metadata.getUncompressedSize()); - Assert.assertEquals(singleMessageMetadataAndPayload.getPayload().toString(Charset.defaultCharset()), "hi-1"); + Assert.assertEquals(payload.toString(Charset.defaultCharset()), "hi-1"); singleMessageMetadataAndPayload.release(); metadataAndPayload.release(); buf.release(); } @Test - public void testMaxNumMessagesInBatch() { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1); + public void testAddDifferentBatchMessage() { + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; boolean isFull = container.add(createMessage(topic, "hi", 0), null); - Assert.assertTrue(isFull); - Assert.assertTrue(container.isBatchFull()); + Assert.assertFalse(isFull); + Assert.assertFalse(container.isBatchFull()); + MessageImpl message = createMessage(topic, "hi-1", 0); + Assert.assertTrue(container.haveEnoughSpace(message)); + isFull = container.add(message, null); + Assert.assertFalse(isFull); + message = createMessage(topic, "hi-2", 1); + Assert.assertFalse(container.haveEnoughSpace(message)); } @Test(expectedExceptions = UnsupportedOperationException.class) public void testCreateOpSendMsg() { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); container.createOpSendMsg(); } @Test public void testToByteBufWithEncryptionWithoutCryptoKeyReader() { setEncryptionAndCompression(true, false); - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; container.add(createMessage(topic, "hi-1", 0), null); Assert.assertEquals(container.getNumMessagesInBatch(), 1); @@ -263,7 +289,7 @@ public void testToByteBufWithEncryptionWithoutCryptoKeyReader() { @Test public void testToByteBufWithEncryptionWithInvalidEncryptKeys() { setEncryptionAndCompression(true, false); - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); container.setCryptoKeyReader(cryptoKeyReader); encryptKeys = new HashMap<>(); encryptKeys.put(null, null); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java index fb09cd9951885..de011ea490c09 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java @@ -30,8 +30,10 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.commons.lang3.tuple.ImmutableTriple; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.MessageId; @@ -50,6 +52,7 @@ import org.testng.annotations.Test; @Test(groups = "broker-impl") +@Slf4j public class RawReaderTest extends MockedPulsarServiceBaseTest { private static final String subscription = "foobar-sub"; @@ -57,6 +60,12 @@ public class RawReaderTest extends MockedPulsarServiceBaseTest { @BeforeMethod @Override public void setup() throws Exception { + conf.setBrokerEntryMetadataInterceptors(org.assertj.core.util.Sets.newTreeSet( + "org.apache.pulsar.common.intercept.AppendBrokerTimestampMetadataInterceptor", + "org.apache.pulsar.common.intercept.AppendIndexMetadataInterceptor" + )); + conf.setSystemTopicEnabled(false); + conf.setExposingBrokerEntryMetadataToClientEnabled(true); super.internalSetup(); admin.clusters().createCluster("test", @@ -110,7 +119,7 @@ public static String extractKey(RawMessage m) { @Test public void testHasMessageAvailableWithoutBatch() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys); RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); while (true) { @@ -127,20 +136,18 @@ public void testHasMessageAvailableWithoutBatch() throws Exception { } } Assert.assertTrue(keys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testHasMessageAvailableWithBatch() throws Exception { int numKeys = 20; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys, true); RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); int messageCount = 0; while (true) { boolean hasMsg = reader.hasMessageAvailableAsync().get(); - if (hasMsg && (messageCount == numKeys)) { - Assert.fail("HasMessageAvailable shows still has message when there is no message"); - } if (hasMsg) { try (RawMessage m = reader.readNextAsync().get()) { MessageMetadata meta = Commands.parseMessageMetadata(m.getHeadersAndPayload()); @@ -157,13 +164,14 @@ public void testHasMessageAvailableWithBatch() throws Exception { } Assert.assertEquals(messageCount, numKeys); Assert.assertTrue(keys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testRawReader() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys); @@ -179,12 +187,13 @@ public void testRawReader() throws Exception { } } Assert.assertTrue(keys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testSeekToStart() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numKeys); @@ -213,12 +222,13 @@ public void testSeekToStart() throws Exception { } } Assert.assertTrue(readKeys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testSeekToMiddle() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numKeys); @@ -256,6 +266,7 @@ public void testSeekToMiddle() throws Exception { } } Assert.assertTrue(readKeys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } /** @@ -264,7 +275,7 @@ public void testSeekToMiddle() throws Exception { @Test public void testFlowControl() throws Exception { int numMessages = RawReaderImpl.DEFAULT_RECEIVER_QUEUE_SIZE * 5; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numMessages); @@ -290,12 +301,13 @@ public void testFlowControl() throws Exception { } Assert.assertEquals(timeouts, 1); Assert.assertEquals(keys.size(), numMessages); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testFlowControlBatch() throws Exception { int numMessages = RawReaderImpl.DEFAULT_RECEIVER_QUEUE_SIZE * 5; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numMessages, true); @@ -318,11 +330,12 @@ public void testFlowControlBatch() throws Exception { } } Assert.assertEquals(keys.size(), numMessages); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testBatchingExtractKeysAndIds() throws Exception { - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); try (Producer producer = pulsarClient.newProducer().topic(topic) .maxPendingMessages(3) @@ -357,7 +370,7 @@ public void testBatchingExtractKeysAndIds() throws Exception { @Test public void testBatchingRebatch() throws Exception { - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); try (Producer producer = pulsarClient.newProducer().topic(topic) .maxPendingMessages(3) @@ -388,7 +401,7 @@ public void testBatchingRebatch() throws Exception { public void testAcknowledgeWithProperties() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys); @@ -419,14 +432,14 @@ public void testAcknowledgeWithProperties() throws Exception { Assert.assertEquals( ledger.openCursor(subscription).getProperties().get("foobar"), Long.valueOf(0xdeadbeefdecaL))); - + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testReadCancellationOnClose() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numKeys/2); RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java index 73fe97996424c..c343ab0d6e294 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java @@ -34,6 +34,7 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.MessageIdAdv; +import org.apache.pulsar.client.api.MessageListener; import org.apache.pulsar.client.api.MessageRouter; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; @@ -57,22 +58,27 @@ import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; - import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; import java.util.Set; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.TreeSet; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -1327,6 +1333,7 @@ public void testPartitionsUpdatesForMultipleTopics() throws Exception { assertEquals(admin.topics().getPartitionedTopicMetadata(topicName1).partitions, 3); consumer.getRecheckPatternTimeout().task().run(consumer.getRecheckPatternTimeout()); + Assert.assertTrue(consumer.getRecheckPatternTimeout().isCancelled()); Awaitility.await().untilAsserted(() -> { Assert.assertEquals(consumer.getPartitionsOfTheTopicMap(), 8); @@ -1393,4 +1400,76 @@ public Map getActiveConsumers() { } } + @DataProvider + public static Object[][] seekByFunction() { + return new Object[][] { + { true }, { false } + }; + } + + @Test(timeOut = 30000, dataProvider = "seekByFunction") + public void testSeekToNewerPosition(boolean seekByFunction) throws Exception { + final var topic1 = TopicName.get(newTopicName()).toString() + .replace("my-property", "public").replace("my-ns", "default"); + final var topic2 = TopicName.get(newTopicName()).toString() + .replace("my-property", "public").replace("my-ns", "default"); + @Cleanup final var producer1 = pulsarClient.newProducer(Schema.STRING).topic(topic1).create(); + @Cleanup final var producer2 = pulsarClient.newProducer(Schema.STRING).topic(topic2).create(); + producer1.send("1-0"); + producer2.send("2-0"); + producer1.send("1-1"); + producer2.send("2-1"); + final var consumer1 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub") + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + final var timestamps = new ArrayList(); + for (int i = 0; i < 4; i++) { + timestamps.add(consumer1.receive().getPublishTime()); + } + timestamps.sort(Comparator.naturalOrder()); + final var timestamp = timestamps.get(2); + consumer1.close(); + + final Function, CompletableFuture> seekAsync = consumer -> { + final var future = seekByFunction ? consumer.seekAsync(__ -> timestamp) : consumer.seekAsync(timestamp); + assertEquals(((ConsumerBase) consumer).getIncomingMessageSize(), 0L); + assertEquals(((ConsumerBase) consumer).getTotalIncomingMessages(), 0); + assertTrue(((ConsumerBase) consumer).getUnAckedMessageTracker().isEmpty()); + return future; + }; + + @Cleanup final var consumer2 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub-2") + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + seekAsync.apply(consumer2).get(); + final var values = new TreeSet(); + for (int i = 0; i < 2; i++) { + values.add(consumer2.receive().getValue()); + } + assertEquals(values, new TreeSet<>(Arrays.asList("1-1", "2-1"))); + + final var valuesInListener = new CopyOnWriteArrayList(); + @Cleanup final var consumer3 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub-3") + .messageListener((MessageListener) (__, msg) -> valuesInListener.add(msg.getValue())) + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + seekAsync.apply(consumer3).get(); + if (valuesInListener.isEmpty()) { + Awaitility.await().untilAsserted(() -> assertEquals(valuesInListener.size(), 2)); + assertEquals(valuesInListener.stream().sorted().toList(), Arrays.asList("1-1", "2-1")); + } // else: consumer3 has passed messages to the listener before seek, in this case we cannot assume anything + + @Cleanup final var consumer4 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub-4") + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + seekAsync.apply(consumer4).get(); + final var valuesInReceiveAsync = new ArrayList(); + valuesInReceiveAsync.add(consumer4.receiveAsync().get().getValue()); + valuesInReceiveAsync.add(consumer4.receiveAsync().get().getValue()); + assertEquals(valuesInReceiveAsync.stream().sorted().toList(), Arrays.asList("1-1", "2-1")); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java index 83feaa3ac1158..348fb04b7dd23 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java @@ -254,6 +254,65 @@ private void testFilterMsgsInPendingAckStateWhenConsumerDisconnect(boolean enabl Assert.assertEquals(receiveCounter, count / 2); } + @Test + private void testMsgsInPendingAckStateWouldNotGetTheConsumerStuck() throws Exception { + final String topicName = NAMESPACE1 + "/testMsgsInPendingAckStateWouldNotGetTheConsumerStuck"; + final String subscription = "test"; + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.INT32) + .topic(topicName) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.INT32) + .topic(topicName) + .subscriptionName(subscription) + .subscriptionType(SubscriptionType.Shared) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + int numStep1Receive = 2, numStep2Receive = 2, numStep3Receive = 2; + int numTotalMessage = numStep1Receive + numStep2Receive + numStep3Receive; + + for (int i = 0; i < numTotalMessage; i++) { + producer.send(i); + } + + Transaction step1Txn = getTxn(); + Transaction step2Txn = getTxn(); + + // Step 1, try to consume some messages but do not commit the transaction + for (int i = 0; i < numStep1Receive; i++) { + consumer.acknowledgeAsync(consumer.receive().getMessageId(), step1Txn).get(); + } + + // Step 2, try to consume some messages and commit the transaction + for (int i = 0; i < numStep2Receive; i++) { + consumer.acknowledgeAsync(consumer.receive().getMessageId(), step2Txn).get(); + } + + // commit step2Txn + step2Txn.commit().get(); + + // close and re-create consumer + consumer.close(); + @Cleanup + Consumer consumer2 = pulsarClient.newConsumer(Schema.INT32) + .topic(topicName) + .receiverQueueSize(numStep3Receive) + .subscriptionName(subscription) + .subscriptionType(SubscriptionType.Shared) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + // Step 3, try to consume the rest messages and should receive all of them + for (int i = 0; i < numStep3Receive; i++) { + // should get the message instead of timeout + Message msg = consumer2.receive(3, TimeUnit.SECONDS); + Assert.assertEquals(msg.getValue(), numStep1Receive + numStep2Receive + i); + } + } + @Test(dataProvider="enableBatch") private void produceCommitTest(boolean enableBatch) throws Exception { @Cleanup @@ -280,9 +339,9 @@ private void produceCommitTest(boolean enableBatch) throws Exception { int messageCnt = 1000; for (int i = 0; i < messageCnt; i++) { if (i % 5 == 0) { - producer.newMessage(txn1).value(("Hello Txn - " + i).getBytes(UTF_8)).send(); + producer.newMessage(txn1).value(("Hello Txn - " + i).getBytes(UTF_8)).sendAsync(); } else { - producer.newMessage(txn2).value(("Hello Txn - " + i).getBytes(UTF_8)).send(); + producer.newMessage(txn2).value(("Hello Txn - " + i).getBytes(UTF_8)).sendAsync(); } txnMessageCnt++; } @@ -811,7 +870,7 @@ private void txnCumulativeAckTest(boolean batchEnable, int maxBatchSize, Subscri public Transaction getTxn() throws Exception { return pulsarClient .newTransaction() - .withTransactionTimeout(10, TimeUnit.SECONDS) + .withTransactionTimeout(10, TimeUnit.MINUTES) .build() .get(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java index 957671b7f8d9c..8524eb27aacce 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java @@ -164,6 +164,7 @@ public void cleanup() throws Exception { @Test public void testEntryLookup() throws Exception { + @Cleanup BookKeeper bk = pulsar.getBookKeeperClientFactory().create( this.conf, null, null, Optional.empty(), null); @@ -219,6 +220,7 @@ public void testEntryLookup() throws Exception { @Test public void testCleanupOldCompactedTopicLedger() throws Exception { + @Cleanup BookKeeper bk = pulsar.getBookKeeperClientFactory().create( this.conf, null, null, Optional.empty(), null); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java index 055c595fbfec8..88d923f74e196 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java @@ -38,6 +38,7 @@ import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; @@ -45,9 +46,13 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.naming.SystemTopicNames; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -212,6 +217,51 @@ public void testCompactionRetentionOnTopicCreationWithTopicPolicies() throws Exc ); } + @Test + public void testRetentionPolicesForSystemTopic() throws Exception { + String namespace = "my-tenant/my-ns"; + String topicPrefix = "persistent://" + namespace + "/"; + admin.namespaces().setRetention(namespace, new RetentionPolicies(-1, -1)); + // Check event topics and transaction internal topics. + for (String eventTopic : SystemTopicNames.EVENTS_TOPIC_NAMES) { + checkSystemTopicRetentionPolicy(topicPrefix + eventTopic); + } + checkSystemTopicRetentionPolicy(topicPrefix + SystemTopicNames.TRANSACTION_COORDINATOR_ASSIGN); + checkSystemTopicRetentionPolicy(topicPrefix + SystemTopicNames.TRANSACTION_COORDINATOR_LOG); + checkSystemTopicRetentionPolicy(topicPrefix + SystemTopicNames.PENDING_ACK_STORE_SUFFIX); + + // Check common topics. + checkCommonTopicRetentionPolicy(topicPrefix + "my-topic" + System.nanoTime()); + // Specify retention policies for system topic. + pulsar.getConfiguration().setTopicLevelPoliciesEnabled(true); + pulsar.getConfiguration().setSystemTopicEnabled(true); + admin.topics().createNonPartitionedTopic(topicPrefix + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT); + admin.topicPolicies().setRetention(topicPrefix + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT, + new RetentionPolicies(10, 10)); + Awaitility.await().untilAsserted(() -> { + checkTopicRetentionPolicy(topicPrefix + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT, + new RetentionPolicies(10, 10)); + }); + } + + private void checkSystemTopicRetentionPolicy(String topicName) throws Exception { + checkTopicRetentionPolicy(topicName, new RetentionPolicies(0, 0)); + + } + + private void checkCommonTopicRetentionPolicy(String topicName) throws Exception { + checkTopicRetentionPolicy(topicName, new RetentionPolicies(-1, -1)); + } + + private void checkTopicRetentionPolicy(String topicName, RetentionPolicies retentionPolicies) throws Exception { + ManagedLedgerConfig config = pulsar.getBrokerService() + .getManagedLedgerConfig(TopicName.get(topicName)).get(); + Assert.assertEquals(config.getRetentionSizeInMB(), retentionPolicies.getRetentionSizeInMB()); + Assert.assertEquals(config.getRetentionTimeMillis(), retentionPolicies.getRetentionTimeInMinutes() < 0 + ? retentionPolicies.getRetentionTimeInMinutes() + : retentionPolicies.getRetentionTimeInMinutes() * 60000L); + } + private void testCompactionCursorRetention(String topic) throws Exception { Set keys = Sets.newHashSet("a", "b", "c"); Set keysToExpire = Sets.newHashSet("x1", "x2"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java index d11a2f87192ff..545dd97675c2d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java @@ -25,7 +25,10 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertSame; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; + import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.buffer.ByteBuf; @@ -46,21 +49,30 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import lombok.Cleanup; import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.api.OpenBuilder; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.ManagedLedgerInfo; import org.apache.bookkeeper.mledger.Position; +import org.apache.commons.lang3.mutable.MutableLong; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.persistent.PersistentDispatcherSingleActiveConsumer; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.persistent.SystemTopic; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.CryptoKeyReader; @@ -70,12 +82,15 @@ import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.BatchMessageIdImpl; -import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.common.api.proto.CommandAck; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; @@ -85,6 +100,7 @@ import org.apache.pulsar.common.protocol.Markers; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -92,6 +108,7 @@ import org.testng.annotations.Test; @Test(groups = "broker-impl") +@Slf4j public class CompactionTest extends MockedPulsarServiceBaseTest { protected ScheduledExecutorService compactionScheduler; protected BookKeeper bk; @@ -534,14 +551,8 @@ public void testBatchMessageIdsDontChange() throws Exception { Assert.assertEquals(message2.getKey(), "key2"); Assert.assertEquals(new String(message2.getData()), "my-message-3"); if (getCompactor() instanceof StrategicTwoPhaseCompactor) { - MessageIdImpl id = (MessageIdImpl) messages.get(0).getMessageId(); - MessageIdImpl id1 = new MessageIdImpl( - id.getLedgerId(), id.getEntryId(), id.getPartitionIndex()); - Assert.assertEquals(message1.getMessageId(), id1); - id = (MessageIdImpl) messages.get(2).getMessageId(); - MessageIdImpl id2 = new MessageIdImpl( - id.getLedgerId(), id.getEntryId(), id.getPartitionIndex()); - Assert.assertEquals(message2.getMessageId(), id2); + Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId()); + Assert.assertEquals(message2.getMessageId(), messages.get(1).getMessageId()); } else { Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId()); Assert.assertEquals(message2.getMessageId(), messages.get(2).getMessageId()); @@ -549,6 +560,60 @@ public void testBatchMessageIdsDontChange() throws Exception { } } + @Test + public void testBatchMessageWithNullValue() throws Exception { + String topic = "persistent://my-property/use/my-ns/my-topic1"; + + pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") + .receiverQueueSize(1).readCompacted(true).subscribe().close(); + + try (Producer producer = pulsarClient.newProducer().topic(topic) + .maxPendingMessages(3) + .enableBatching(true) + .batchingMaxMessages(3) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create() + ) { + // batch 1 + producer.newMessage().key("key1").value("my-message-1".getBytes()).sendAsync(); + producer.newMessage().key("key1").value(null).sendAsync(); + producer.newMessage().key("key2").value("my-message-3".getBytes()).send(); + + // batch 2 + producer.newMessage().key("key3").value("my-message-4".getBytes()).sendAsync(); + producer.newMessage().key("key3").value("my-message-5".getBytes()).sendAsync(); + producer.newMessage().key("key3").value("my-message-6".getBytes()).send(); + + // batch 3 + producer.newMessage().key("key4").value("my-message-7".getBytes()).sendAsync(); + producer.newMessage().key("key4").value(null).sendAsync(); + producer.newMessage().key("key5").value("my-message-9".getBytes()).send(); + } + + + // compact the topic + compact(topic); + + // Read messages before compaction to get ids + List> messages = new ArrayList<>(); + try (Consumer consumer = pulsarClient.newConsumer().topic(topic) + .subscriptionName("sub1").receiverQueueSize(1).readCompacted(true).subscribe()) { + while (true) { + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message == null) { + break; + } + messages.add(message); + } + } + + assertEquals(messages.size(), 3); + assertEquals(messages.get(0).getKey(), "key2"); + assertEquals(messages.get(1).getKey(), "key3"); + assertEquals(messages.get(2).getKey(), "key5"); + } + @Test public void testWholeBatchCompactedOut() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; @@ -585,8 +650,17 @@ public void testWholeBatchCompactedOut() throws Exception { } } - @Test - public void testKeyLessMessagesPassThrough() throws Exception { + @DataProvider(name = "retainNullKey") + public static Object[][] retainNullKey() { + return new Object[][] {{true}, {false}}; + } + + @Test(dataProvider = "retainNullKey") + public void testKeyLessMessagesPassThrough(boolean retainNullKey) throws Exception { + conf.setTopicCompactionRetainNullKey(retainNullKey); + restartBroker(); + FieldUtils.writeDeclaredField(compactor, "topicCompactionRetainNullKey", retainNullKey, true); + String topic = "persistent://my-property/use/my-ns/my-topic1"; // subscribe before sending anything, so that we get all messages @@ -627,29 +701,25 @@ public void testKeyLessMessagesPassThrough() throws Exception { Message m = consumer.receive(2, TimeUnit.SECONDS); assertNull(m); } else { - Message message1 = consumer.receive(); - Assert.assertFalse(message1.hasKey()); - Assert.assertEquals(new String(message1.getData()), "my-message-1"); - - Message message2 = consumer.receive(); - Assert.assertFalse(message2.hasKey()); - Assert.assertEquals(new String(message2.getData()), "my-message-2"); - - Message message3 = consumer.receive(); - Assert.assertEquals(message3.getKey(), "key1"); - Assert.assertEquals(new String(message3.getData()), "my-message-4"); - - Message message4 = consumer.receive(); - Assert.assertEquals(message4.getKey(), "key2"); - Assert.assertEquals(new String(message4.getData()), "my-message-6"); - - Message message5 = consumer.receive(); - Assert.assertFalse(message5.hasKey()); - Assert.assertEquals(new String(message5.getData()), "my-message-7"); + List> result = new ArrayList<>(); + while (true) { + Message message = consumer.receive(10, TimeUnit.SECONDS); + if (message == null) { + break; + } + result.add(Pair.of(message.getKey(), message.getData() == null ? null : new String(message.getData()))); + } - Message message6 = consumer.receive(); - Assert.assertFalse(message6.hasKey()); - Assert.assertEquals(new String(message6.getData()), "my-message-8"); + List> expectList; + if (retainNullKey) { + expectList = List.of( + Pair.of(null, "my-message-1"), Pair.of(null, "my-message-2"), + Pair.of("key1", "my-message-4"), Pair.of("key2", "my-message-6"), + Pair.of(null, "my-message-7"), Pair.of(null, "my-message-8")); + } else { + expectList = List.of(Pair.of("key1", "my-message-4"), Pair.of("key2", "my-message-6")); + } + Assert.assertEquals(result, expectList); } } } @@ -1711,9 +1781,9 @@ public void testReadUnCompacted(boolean batchEnabled) throws PulsarClientExcepti @SneakyThrows @Test public void testHealthCheckTopicNotCompacted() { - NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()); + NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfiguration()); String topicV1 = "persistent://" + heartbeatNamespaceV1.toString() + "/healthcheck"; - NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()); + NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfiguration()); String topicV2 = heartbeatNamespaceV2.toString() + "/healthcheck"; Producer producer1 = pulsarClient.newProducer().topic(topicV1).create(); Producer producer2 = pulsarClient.newProducer().topic(topicV2).create(); @@ -1783,4 +1853,342 @@ public void addFailed(ManagedLedgerException exception, Object ctx) { Assert.assertNotEquals(ledgerId, -1L); }); } + + @Test(timeOut = 100000) + public void testReceiverQueueSize() throws Exception { + final String topicName = "persistent://my-property/use/my-ns/testReceiverQueueSize" + UUID.randomUUID(); + final String subName = "my-sub"; + final int receiveQueueSize = 1; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i % 2)).value(String.valueOf(i)).sendAsync(); + } + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.STRING) + .topic(topicName).readCompacted(true).receiverQueueSize(receiveQueueSize).subscriptionName(subName) + .subscribe(); + + //Give some time to consume + Awaitility.await() + .untilAsserted(() -> Assert.assertEquals(consumer.getStats().getMsgNumInReceiverQueue().intValue(), + receiveQueueSize)); + consumer.close(); + producer.close(); + } + + @Test + public void testDispatcherMaxReadSizeBytes() throws Exception { + final String topicName = + "persistent://my-property/use/my-ns/testDispatcherMaxReadSizeBytes" + UUID.randomUUID(); + final String subName = "my-sub"; + final int receiveQueueSize = 1; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topicName).create(); + + for (int i = 0; i < 10; i+=2) { + producer.newMessage().key(UUID.randomUUID().toString()).value(new byte[4*1024*1024]).send(); + } + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + admin.topics().unload(topicName); + + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.BYTES) + .topic(topicName).readCompacted(true).receiverQueueSize(receiveQueueSize).subscriptionName(subName) + .subscribe(); + + PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); + PersistentSubscription persistentSubscription = topic.getSubscriptions().get(subName); + PersistentDispatcherSingleActiveConsumer dispatcher = + Mockito.spy((PersistentDispatcherSingleActiveConsumer) persistentSubscription.getDispatcher()); + FieldUtils.writeDeclaredField(persistentSubscription, "dispatcher", dispatcher, true); + + Awaitility.await().untilAsserted(() -> { + assertSame(consumer.getStats().getMsgNumInReceiverQueue(), 1); + }); + + consumer.increaseAvailablePermits(2); + + Thread.sleep(2000); + + Mockito.verify(dispatcher, Mockito.atLeastOnce()) + .readEntriesComplete(Mockito.argThat(argument -> argument.size() == 1), + Mockito.any(PersistentDispatcherSingleActiveConsumer.ReadEntriesCtx.class)); + + consumer.close(); + producer.close(); + } + + @Test + public void testCompactionDuplicate() throws Exception { + String topic = "persistent://my-property/use/my-ns/testCompactionDuplicate"; + final int numMessages = 1000; + final int maxKeys = 800; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + // trigger compaction (create __compaction cursor) + admin.topics().triggerCompaction(topic); + + Map expected = new HashMap<>(); + Random r = new Random(0); + + pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close(); + + for (int j = 0; j < numMessages; j++) { + int keyIndex = r.nextInt(maxKeys); + String key = "key" + keyIndex; + byte[] data = ("my-message-" + key + "-" + j).getBytes(); + producer.newMessage().key(key).value(data).send(); + expected.put(key, data); + } + + producer.flush(); + + // trigger compaction + admin.topics().triggerCompaction(topic); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topic).status, + LongRunningProcessStatus.Status.RUNNING); + }); + + // Wait for phase one to complete + Thread.sleep(500); + + // Unload topic make reader of compaction reconnect + admin.topics().unload(topic); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topic, false); + // Compacted topic ledger should have same number of entry equals to number of unique key. + Assert.assertEquals(internalStats.compactedLedger.entries, expected.size()); + Assert.assertTrue(internalStats.compactedLedger.ledgerId > -1); + Assert.assertFalse(internalStats.compactedLedger.offloaded); + }); + + // consumer with readCompacted enabled only get compacted entries + try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") + .readCompacted(true).subscribe()) { + while (true) { + Message m = consumer.receive(2, TimeUnit.SECONDS); + Assert.assertEquals(expected.remove(m.getKey()), m.getData()); + if (expected.isEmpty()) { + break; + } + } + } + } + + @Test + public void testDeleteCompactedLedger() throws Exception { + String topicName = "persistent://my-property/use/my-ns/testDeleteCompactedLedger"; + + final String subName = "my-sub"; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).readCompacted(true).subscribe().close(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i % 2)).value(String.valueOf(i)).sendAsync(); + } + producer.flush(); + + compact(topicName); + + MutableLong compactedLedgerId = new MutableLong(-1); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topicName); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1L); + compactedLedgerId.setValue(stats.compactedLedger.ledgerId); + Assert.assertEquals(stats.compactedLedger.entries, 2L); + }); + + // delete compacted ledger + admin.topics().deleteSubscription(topicName, "__compaction"); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topicName); + Assert.assertEquals(stats.compactedLedger.ledgerId, -1L); + Assert.assertEquals(stats.compactedLedger.entries, -1L); + assertThrows(BKException.BKNoSuchLedgerExistsException.class, () -> pulsarTestContext.getBookKeeperClient() + .openLedger(compactedLedgerId.getValue(), BookKeeper.DigestType.CRC32C, new byte[]{})); + }); + + compact(topicName); + + MutableLong compactedLedgerId2 = new MutableLong(-1); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topicName); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1L); + compactedLedgerId2.setValue(stats.compactedLedger.ledgerId); + Assert.assertEquals(stats.compactedLedger.entries, 2L); + }); + + producer.close(); + admin.topics().delete(topicName); + + Awaitility.await().untilAsserted(() -> assertThrows(BKException.BKNoSuchLedgerExistsException.class, + () -> pulsarTestContext.getBookKeeperClient().openLedger( + compactedLedgerId2.getValue(), BookKeeper.DigestType.CRC32, new byte[]{}))); + } + + @Test + public void testDeleteCompactedLedgerWithSlowAck() throws Exception { + // Disable topic level policies, since block ack thread may also block thread of delete topic policies. + conf.setTopicLevelPoliciesEnabled(false); + restartBroker(); + + String topicName = "persistent://my-property/use/my-ns/testDeleteCompactedLedgerWithSlowAck"; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + pulsarClient.newConsumer().topic(topicName).subscriptionType(SubscriptionType.Exclusive) + .subscriptionName(Compactor.COMPACTION_SUBSCRIPTION) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).readCompacted(true).subscribe() + .close(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i % 2)).value(String.valueOf(i)).sendAsync(); + } + producer.flush(); + + PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); + PersistentSubscription subscription = spy(topic.getSubscription(Compactor.COMPACTION_SUBSCRIPTION)); + topic.getSubscriptions().put(Compactor.COMPACTION_SUBSCRIPTION, subscription); + + AtomicLong compactedLedgerId = new AtomicLong(-1); + AtomicBoolean pauseAck = new AtomicBoolean(); + Mockito.doAnswer(invocationOnMock -> { + Map properties = (Map) invocationOnMock.getArguments()[2]; + log.info("acknowledgeMessage properties: {}", properties); + compactedLedgerId.set(properties.get(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY)); + pauseAck.set(true); + while (pauseAck.get()) { + Thread.sleep(200); + } + return invocationOnMock.callRealMethod(); + }).when(subscription).acknowledgeMessage(Mockito.any(), Mockito.eq( + CommandAck.AckType.Cumulative), Mockito.any()); + + admin.topics().triggerCompaction(topicName); + + while (!pauseAck.get()) { + Thread.sleep(100); + } + + CompletableFuture currentCompaction = + (CompletableFuture) FieldUtils.readDeclaredField(topic, "currentCompaction", true); + CompletableFuture spyCurrentCompaction = spy(currentCompaction); + FieldUtils.writeDeclaredField(topic, "currentCompaction", spyCurrentCompaction, true); + currentCompaction.whenComplete((obj, throwable) -> { + if (throwable != null) { + spyCurrentCompaction.completeExceptionally(throwable); + } else { + spyCurrentCompaction.complete(obj); + } + }); + Mockito.doAnswer(invocationOnMock -> { + pauseAck.set(false); + return invocationOnMock.callRealMethod(); + }).when(spyCurrentCompaction).handle(Mockito.any()); + + admin.topics().delete(topicName, true); + + Awaitility.await().untilAsserted(() -> assertThrows(BKException.BKNoSuchLedgerExistsException.class, + () -> pulsarTestContext.getBookKeeperClient().openLedger( + compactedLedgerId.get(), BookKeeper.DigestType.CRC32, new byte[]{}))); + } + + @Test + public void testCompactionWithTTL() throws Exception { + String topicName = "persistent://my-property/use/my-ns/testCompactionWithTTL"; + String subName = "sub"; + pulsarClient.newConsumer(Schema.STRING).topic(topicName).subscriptionName(subName).readCompacted(true) + .subscribe().close(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + producer.newMessage().key("K1").value("V1").send(); + producer.newMessage().key("K2").value("V2").send(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + producer.newMessage().key("K1").value("V3").send(); + producer.newMessage().key("K2").value("V4").send(); + + Thread.sleep(1000); + + // expire messages + admin.topics().expireMessagesForAllSubscriptions(topicName, 1); + + // trim the topic + admin.topics().unload(topicName); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topicName, false); + assertEquals(internalStats.numberOfEntries, 4); + }); + + producer.newMessage().key("K3").value("V5").send(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Consumer consumer = + pulsarClient.newConsumer(Schema.STRING).topic(topicName).subscriptionName(subName).readCompacted(true) + .subscribe(); + + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + + result.add(receive.getValue()); + } + + Assert.assertEquals(result, List.of("V3", "V4", "V5")); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java index e86be6a4db816..eab5f10bd295f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java @@ -19,13 +19,12 @@ package org.apache.pulsar.compaction; import static org.apache.pulsar.client.impl.RawReaderTest.extractKey; - +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import io.netty.buffer.ByteBuf; - import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; @@ -33,23 +32,38 @@ import java.util.Map; import java.util.Optional; import java.util.Random; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerEntry; import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.mledger.Entry; +import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.RawMessage; +import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.RawMessageImpl; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.protocol.Commands; +import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -65,7 +79,6 @@ public class CompactorTest extends MockedPulsarServiceBaseTest { protected Compactor compactor; - @BeforeMethod @Override public void setup() throws Exception { @@ -101,16 +114,17 @@ protected Compactor getCompactor() { return compactor; } - private List compactAndVerify(String topic, Map expected, boolean checkMetrics) throws Exception { + private List compactAndVerify(String topic, Map expected, boolean checkMetrics) + throws Exception { long compactedLedgerId = compact(topic); LedgerHandle ledger = bk.openLedger(compactedLedgerId, - Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, - Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); + Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, + Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); Assert.assertEquals(ledger.getLastAddConfirmed() + 1, // 0..lac - expected.size(), - "Should have as many entries as there is keys"); + expected.size(), + "Should have as many entries as there is keys"); List keys = new ArrayList<>(); Enumeration entries = ledger.readEntries(0, ledger.getLastAddConfirmed()); @@ -124,7 +138,7 @@ private List compactAndVerify(String topic, Map expected byte[] bytes = new byte[payload.readableBytes()]; payload.readBytes(bytes); Assert.assertEquals(bytes, expected.remove(key), - "Compacted version should match expected version"); + "Compacted version should match expected version"); m.close(); } if (checkMetrics) { @@ -148,17 +162,18 @@ public void testCompaction() throws Exception { final int numMessages = 1000; final int maxKeys = 10; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) - .enableBatching(false) - .messageRoutingMode(MessageRoutingMode.SinglePartition) - .create(); + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); Map expected = new HashMap<>(); Random r = new Random(0); for (int j = 0; j < numMessages; j++) { int keyIndex = r.nextInt(maxKeys); - String key = "key"+keyIndex; + String key = "key" + keyIndex; byte[] data = ("my-message-" + key + "-" + j).getBytes(); producer.newMessage() .key(key) @@ -169,14 +184,64 @@ public void testCompaction() throws Exception { compactAndVerify(topic, expected, true); } + @Test + public void testAllCompactedOut() throws Exception { + String topicName = "persistent://my-property/use/my-ns/testAllCompactedOut"; + // set retain null key to true + boolean oldRetainNullKey = pulsar.getConfig().isTopicCompactionRetainNullKey(); + pulsar.getConfig().setTopicCompactionRetainNullKey(true); + this.restartBroker(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(true).topic(topicName).batchingMaxMessages(3).create(); + + producer.newMessage().key("K1").value("V1").sendAsync(); + producer.newMessage().key("K2").value("V2").sendAsync(); + producer.newMessage().key("K2").value(null).sendAsync(); + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + producer.newMessage().key("K1").value(null).sendAsync(); + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Reader reader = pulsarClient.newReader(Schema.STRING) + .subscriptionName("reader-test") + .topic(topicName) + .readCompacted(true) + .startMessageId(MessageId.earliest) + .create(); + while (reader.hasMessageAvailable()) { + Message message = reader.readNext(3, TimeUnit.SECONDS); + Assert.assertNotNull(message); + } + // set retain null key back to avoid affecting other tests + pulsar.getConfig().setTopicCompactionRetainNullKey(oldRetainNullKey); + } + @Test public void testCompactAddCompact() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) - .enableBatching(false) - .messageRoutingMode(MessageRoutingMode.SinglePartition) - .create(); + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); Map expected = new HashMap<>(); @@ -210,10 +275,11 @@ public void testCompactAddCompact() throws Exception { public void testCompactedInOrder() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) - .enableBatching(false) - .messageRoutingMode(MessageRoutingMode.SinglePartition) - .create(); + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); producer.newMessage() .key("c") @@ -251,11 +317,58 @@ public void testCompactEmptyTopic() throws Exception { public void testPhaseOneLoopTimeConfiguration() { ServiceConfiguration configuration = new ServiceConfiguration(); configuration.setBrokerServiceCompactionPhaseOneLoopTimeInSeconds(60); - TwoPhaseCompactor compactor = new TwoPhaseCompactor(configuration, Mockito.mock(PulsarClientImpl.class), + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); + TwoPhaseCompactor compactor = new TwoPhaseCompactor(configuration, mockClient, Mockito.mock(BookKeeper.class), compactionScheduler); Assert.assertEquals(compactor.getPhaseOneLoopReadTimeoutInSeconds(), 60); } + @Test + public void testCompactedWithConcurrentSend() throws Exception { + String topic = "persistent://my-property/use/my-ns/testCompactedWithConcurrentSend"; + + @Cleanup + Producer producer = pulsarClient.newProducer().topic(topic) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + BookKeeper bk = pulsar.getBookKeeperClientFactory().create( + this.conf, null, null, Optional.empty(), null); + Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + + CompletableFuture future = CompletableFuture.runAsync(() -> { + for (int i = 0; i < 100; i++) { + try { + producer.newMessage().key(String.valueOf(i)).value(String.valueOf(i).getBytes()).send(); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + } + }); + + PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + CompactedTopic compactedTopic = persistentTopic.getCompactedTopic(); + + Awaitility.await().untilAsserted(() -> { + long compactedLedgerId = compactor.compact(topic).join(); + Thread.sleep(300); + Optional compactedTopicContext = persistentTopic.getCompactedTopicContext(); + Assert.assertTrue(compactedTopicContext.isPresent()); + Assert.assertEquals(compactedTopicContext.get().ledger.getId(), compactedLedgerId); + }); + + Position lastCompactedPosition = compactedTopic.getCompactionHorizon().get(); + Entry lastCompactedEntry = compactedTopic.readLastEntryOfCompactedLedger().get(); + + Assert.assertTrue(PositionImpl.get(lastCompactedPosition.getLedgerId(), lastCompactedPosition.getEntryId()) + .compareTo(lastCompactedEntry.getLedgerId(), lastCompactedEntry.getEntryId()) >= 0); + + future.join(); + } + public ByteBuf extractPayload(RawMessage m) throws Exception { ByteBuf payloadAndMetadata = m.getHeadersAndPayload(); Commands.skipChecksumIfPresent(payloadAndMetadata); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java index 0be9fa407542f..317b1a227e585 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java @@ -30,6 +30,7 @@ import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; @@ -263,6 +264,48 @@ public void testGetLastMessageIdAfterCompaction(boolean enabledBatch) throws Exc admin.topics().delete(topicName, false); } + @Test(dataProvider = "enabledBatch") + public void testGetLastMessageIdAfterCompactionWithCompression(boolean enabledBatch) throws Exception { + String topicName = "persistent://public/default/" + BrokerTestUtil.newUniqueName("tp"); + String subName = "sub"; + Consumer consumer = createConsumer(topicName, subName); + var producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .batchingMaxPublishDelay(3, TimeUnit.HOURS) + .batchingMaxBytes(Integer.MAX_VALUE) + .compressionType(CompressionType.ZSTD) + .enableBatching(enabledBatch).create(); + + List> sendFutures = new ArrayList<>(); + sendFutures.add(producer.newMessage().key("k0").value("v0").sendAsync()); + sendFutures.add(producer.newMessage().key("k0").value("v1").sendAsync()); + sendFutures.add(producer.newMessage().key("k0").value("v2").sendAsync()); + producer.flush(); + sendFutures.add(producer.newMessage().key("k1").value("v0").sendAsync()); + sendFutures.add(producer.newMessage().key("k1").value("v1").sendAsync()); + sendFutures.add(producer.newMessage().key("k1").value("v2").sendAsync()); + producer.flush(); + FutureUtil.waitForAll(sendFutures).join(); + + triggerCompactionAndWait(topicName); + + MessageIdImpl lastMessageIdByTopic = getLastMessageIdByTopic(topicName); + MessageIdImpl messageId = (MessageIdImpl) consumer.getLastMessageId(); + assertEquals(messageId.getLedgerId(), lastMessageIdByTopic.getLedgerId()); + assertEquals(messageId.getEntryId(), lastMessageIdByTopic.getEntryId()); + if (enabledBatch){ + BatchMessageIdImpl lastBatchMessageIdByTopic = (BatchMessageIdImpl) lastMessageIdByTopic; + BatchMessageIdImpl batchMessageId = (BatchMessageIdImpl) messageId; + assertEquals(batchMessageId.getBatchSize(), lastBatchMessageIdByTopic.getBatchSize()); + assertEquals(batchMessageId.getBatchIndex(), lastBatchMessageIdByTopic.getBatchIndex()); + } + + // cleanup. + consumer.close(); + producer.close(); + admin.topics().delete(topicName, false); + } + @Test(dataProvider = "enabledBatch") public void testGetLastMessageIdAfterCompactionEndWithNullMsg(boolean enabledBatch) throws Exception { String topicName = "persistent://public/default/" + BrokerTestUtil.newUniqueName("tp"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java index 02812898dc49a..09c3ebe419394 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java @@ -25,7 +25,11 @@ import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState.Releasing; import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState.Splitting; import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState.isValidTransition; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl.MSG_COMPRESSION_TYPE; import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateData.state; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; @@ -48,6 +52,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import org.apache.bookkeeper.client.BookKeeper; import org.apache.commons.lang.reflect.FieldUtils; @@ -68,6 +73,7 @@ import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.impl.ReaderImpl; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.RetentionPolicies; @@ -162,7 +168,7 @@ public void setup() throws Exception { @Override public void cleanup() throws Exception { super.internalCleanup(); - + bk.close(); if (compactionScheduler != null) { compactionScheduler.shutdownNow(); } @@ -185,6 +191,7 @@ TestData generateTestData() throws PulsarAdminException, PulsarClientException { Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); @@ -352,12 +359,13 @@ public void testCompactionWithTableview() throws Exception { compactor.compact(topic, strategy).get(); // consumer with readCompacted enabled only get compacted entries - var tableview = pulsar.getClient().newTableViewBuilder(schema) + var tableview = pulsar.getClient().newTableView(schema) .topic(topic) .loadConf(Map.of( "topicCompactionStrategyClassName", ServiceUnitStateCompactionStrategy.class.getName())) .create(); + for(var etr : tableview.entrySet()){ Assert.assertEquals(expected.remove(etr.getKey()), etr.getValue()); if (expected.isEmpty()) { @@ -376,6 +384,7 @@ public void testReadCompactedBeforeCompaction() throws Exception { Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .create(); @@ -420,6 +429,7 @@ public void testReadEntriesAfterCompaction() throws Exception { Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .create(); @@ -460,6 +470,7 @@ public void testSeekEarliestAfterCompaction() throws Exception { Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .create(); @@ -557,6 +568,7 @@ public void testSlowTableviewAfterCompaction() throws Exception { Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); @@ -621,12 +633,87 @@ public void testSlowTableviewAfterCompaction() throws Exception { } + @Test + public void testSlowReceiveTableviewAfterCompaction() throws Exception { + String topic = "persistent://my-property/use/my-ns/my-topic1"; + String strategyClassName = "topicCompactionStrategyClassName"; + + pulsarClient.newConsumer(schema) + .topic(topic) + .subscriptionName("sub1") + .readCompacted(true) + .subscribe().close(); + + var tv = pulsar.getClient().newTableViewBuilder(schema) + .topic(topic) + .subscriptionName("slowTV") + .loadConf(Map.of( + strategyClassName, + ServiceUnitStateCompactionStrategy.class.getName())) + .create(); + + // Configure retention to ensue data is retained for reader + admin.namespaces().setRetention("my-property/use/my-ns", + new RetentionPolicies(-1, -1)); + + Producer producer = pulsarClient.newProducer(schema) + .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) + .enableBatching(true) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + StrategicTwoPhaseCompactor compactor + = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + + var reader = ((CompletableFuture>) FieldUtils + .readDeclaredField(tv, "reader", true)).get(); + var consumer = spy(reader.getConsumer()); + FieldUtils.writeDeclaredField(reader, "consumer", consumer, true); + String bundle = "bundle1"; + final AtomicInteger versionId = new AtomicInteger(0); + final AtomicInteger cnt = new AtomicInteger(1); + int msgAddCount = 1000; // has to be big enough to cover compacted cursor fast-forward. + doAnswer(invocationOnMock -> { + if (cnt.decrementAndGet() == 0) { + var msg = consumer.receiveAsync(); + for (int i = 0; i < msgAddCount; i++) { + producer.newMessage().key(bundle).value( + new ServiceUnitStateData(Owned, "broker" + versionId.incrementAndGet(), true, + versionId.get())).send(); + } + compactor.compact(topic, strategy).join(); + return msg; + } + // Call the real method + reset(consumer); + return consumer.receiveAsync(); + }).when(consumer).receiveAsync(); + producer.newMessage().key(bundle).value( + new ServiceUnitStateData(Owned, "broker", true, + versionId.incrementAndGet())).send(); + producer.newMessage().key(bundle).value( + new ServiceUnitStateData(Owned, "broker" + versionId.incrementAndGet(), true, + versionId.get())).send(); + Awaitility.await().atMost(10, TimeUnit.SECONDS).untilAsserted( + () -> { + var val = tv.get(bundle); + assertNotNull(val); + assertEquals(val.dstBroker(), "broker" + versionId.get()); + } + ); + + producer.close(); + tv.close(); + } + @Test public void testBrokerRestartAfterCompaction() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .create(); String key = "key0"; @@ -672,6 +759,7 @@ public void testCompactEmptyTopic() throws Exception { Producer producer = pulsarClient.newProducer(schema) .topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) .enableBatching(true) .create(); @@ -733,7 +821,9 @@ public void testWholeBatchCompactedOut() throws Exception { public void testCompactionWithLastDeletedKey() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; - Producer producer = pulsarClient.newProducer(schema).topic(topic).enableBatching(true) + Producer producer = pulsarClient.newProducer(schema).topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) + .enableBatching(true) .messageRoutingMode(MessageRoutingMode.SinglePartition).create(); pulsarClient.newConsumer(schema).topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close(); @@ -761,7 +851,9 @@ public void testCompactionWithLastDeletedKey() throws Exception { public void testEmptyCompactionLedger() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; - Producer producer = pulsarClient.newProducer(schema).topic(topic).enableBatching(true) + Producer producer = pulsarClient.newProducer(schema).topic(topic) + .compressionType(MSG_COMPRESSION_TYPE) + .enableBatching(true) .messageRoutingMode(MessageRoutingMode.SinglePartition).create(); pulsarClient.newConsumer(schema).topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close(); @@ -791,7 +883,8 @@ public void testAllEmptyCompactionLedger() throws Exception { final int messages = 10; // 1.create producer and publish message to the topic. - ProducerBuilder builder = pulsarClient.newProducer(schema).topic(topic); + ProducerBuilder builder = pulsarClient.newProducer(schema) + .compressionType(MSG_COMPRESSION_TYPE).topic(topic); builder.batchingMaxMessages(messages / 5); Producer producer = builder.create(); @@ -828,6 +921,7 @@ public void testCompactMultipleTimesWithoutEmptyMessage() // 1.create producer and publish message to the topic. ProducerBuilder builder = pulsarClient.newProducer(schema).topic(topic); + builder.compressionType(MSG_COMPRESSION_TYPE); builder.enableBatching(true); @@ -876,6 +970,7 @@ public void testReadUnCompacted() // 1.create producer and publish message to the topic. ProducerBuilder builder = pulsarClient.newProducer(schema).topic(topic); + builder.compressionType(MSG_COMPRESSION_TYPE); builder.batchingMaxMessages(messages / 5); Producer producer = builder.create(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java index 1cac04c2fa956..e556ec8e0b200 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java @@ -34,7 +34,7 @@ public class StrategicCompactionRetentionTest extends CompactionRetentionTest { @Override public void setup() throws Exception { super.setup(); - compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler, 1); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); strategy = new TopicCompactionStrategyTest.DummyTopicCompactionStrategy(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java index 135a839bd54a8..9e327ef90fae4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java @@ -18,22 +18,32 @@ */ package org.apache.pulsar.compaction; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl.MSG_COMPRESSION_TYPE; +import static org.testng.Assert.assertEquals; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.TableView; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.topics.TopicCompactionStrategy; +import org.apache.pulsar.common.util.FutureUtil; import org.testng.Assert; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -47,7 +57,7 @@ public class StrategicCompactionTest extends CompactionTest { @Override public void setup() throws Exception { super.setup(); - compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler, 1); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); strategy = new TopicCompactionStrategyTest.DummyTopicCompactionStrategy(); } @@ -148,5 +158,79 @@ public void testNumericOrderCompaction() throws Exception { Assert.assertEquals(tableView.entrySet(), expectedCopy.entrySet()); } + @Test(timeOut = 20000) + public void testSameBatchCompactToSameBatch() throws Exception { + final String topic = + "persistent://my-property/use/my-ns/testSameBatchCompactToSameBatch" + UUID.randomUUID(); + // Use odd number to make sure the last message is flush by `reader.hasNext() == false`. + final int messages = 11; + + // 1.create producer and publish message to the topic. + ProducerBuilder builder = pulsarClient.newProducer(Schema.INT32) + .compressionType(MSG_COMPRESSION_TYPE).topic(topic); + builder.batchingMaxMessages(2) + .batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS); + + Producer producer = builder.create(); + + List> futures = new ArrayList<>(messages); + for (int i = 0; i < messages; i++) { + futures.add(producer.newMessage().key(String.valueOf(i)) + .value(i) + .sendAsync()); + } + FutureUtil.waitForAll(futures).get(); + + // 2.compact the topic. + StrategicTwoPhaseCompactor compactor + = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + compactor.compact(topic, strategy).get(); + + // consumer with readCompacted enabled only get compacted entries + try (Consumer consumer = pulsarClient + .newConsumer(Schema.INT32) + .topic(topic) + .subscriptionName("sub1") + .readCompacted(true) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { + int received = 0; + while (true) { + Message m = consumer.receive(2, TimeUnit.SECONDS); + if (m == null) { + break; + } + MessageIdAdv messageId = (MessageIdAdv) m.getMessageId(); + if (received < messages - 1) { + assertEquals(messageId.getBatchSize(), 2); + } else { + assertEquals(messageId.getBatchSize(), 0); + } + received++; + } + assertEquals(received, messages); + } + + } + + @Override + public void testCompactCompressedBatching() throws Exception { + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + super.testCompactCompressedBatching(); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + } + + @Override + public void testCompactEncryptedAndCompressedBatching() throws Exception { + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + super.testCompactEncryptedAndCompressedBatching(); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + } + + @Override + public void testCompactEncryptedBatching() throws Exception { + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + super.testCompactEncryptedBatching(); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java index 91dd8a2bd358b..bc65791b323cd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java @@ -33,7 +33,7 @@ public class StrategicCompactorTest extends CompactorTest { @Override public void setup() throws Exception { super.setup(); - compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler, 1); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); strategy = new TopicCompactionStrategyTest.DummyTopicCompactionStrategy(); } @@ -46,4 +46,4 @@ protected long compact(String topic) throws ExecutionException, InterruptedExcep protected Compactor getCompactor() { return compactor; } -} \ No newline at end of file +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionStrategyTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionStrategyTest.java index 0ecd09606cea7..50d4cdc7b0a71 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionStrategyTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionStrategyTest.java @@ -41,13 +41,13 @@ public boolean shouldKeepLeft(byte[] prev, byte[] cur) { @Test(expectedExceptions = IllegalArgumentException.class) public void testLoadInvalidTopicCompactionStrategy() { - TopicCompactionStrategy.load("uknown"); + TopicCompactionStrategy.load("uknown", "uknown"); } @Test public void testNumericOrderCompactionStrategy() { TopicCompactionStrategy strategy = - TopicCompactionStrategy.load(NumericOrderCompactionStrategy.class.getCanonicalName()); + TopicCompactionStrategy.load("numeric", NumericOrderCompactionStrategy.class.getCanonicalName()); Assert.assertFalse(strategy.shouldKeepLeft(1, 2)); Assert.assertTrue(strategy.shouldKeepLeft(2, 1)); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java index 714c9d7269970..cbf2f28b0b50b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java @@ -34,6 +34,7 @@ import java.net.URL; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; @@ -267,8 +268,12 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setAuthorizationEnabled(config.isAuthorizationEnabled()); workerConfig.setAuthorizationProvider(config.getAuthorizationProvider()); + List urlPatterns = + List.of(getPulsarApiExamplesJar().getParentFile().toURI() + ".*", "http://127\\.0\\.0\\.1:.*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); + PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java index c832cba163d63..aa190cd2e0a73 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java @@ -89,6 +89,7 @@ import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.io.core.Sink; import org.apache.pulsar.io.core.SinkContext; +import org.apache.pulsar.utils.ResourceUtils; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -121,11 +122,16 @@ public class PulsarFunctionLocalRunTest { private static final String CLUSTER = "local"; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; + private final String TLS_SERVER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + private final String TLS_SERVER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + private final String TLS_CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + private final String TLS_CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); + private final String TLS_TRUST_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); private static final String SYSTEM_PROPERTY_NAME_NAR_FILE_PATH = "pulsar-io-data-generator.nar.path"; private PulsarFunctionTestTemporaryDirectory tempDirectory; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java index 95923586fe23e..72e6a3766aeee 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java @@ -71,6 +71,7 @@ import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory; import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactoryConfig; +import org.apache.pulsar.utils.ResourceUtils; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -99,11 +100,16 @@ public class PulsarFunctionPublishTest { String primaryHost; String workerId; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; + private final String TLS_SERVER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + private final String TLS_SERVER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + private final String TLS_CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + private final String TLS_CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); + private final String TLS_TRUST_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); private PulsarFunctionTestTemporaryDirectory tempDirectory; @DataProvider(name = "validRoleName") @@ -262,8 +268,11 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setAuthenticationEnabled(true); workerConfig.setAuthorizationEnabled(true); + List urlPatterns = List.of(getPulsarApiExamplesJar().getParentFile().toURI() + ".*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); + PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java index 00db6a65b16ea..3508cf0bfc7e6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java @@ -20,6 +20,8 @@ import static org.apache.pulsar.common.util.PortManager.nextLockedFreePort; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Sets; @@ -30,9 +32,11 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; @@ -41,6 +45,7 @@ import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.impl.auth.AuthenticationTls; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.WorkerInfo; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.ObjectMapperFactory; @@ -50,7 +55,9 @@ import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactoryConfig; import org.apache.pulsar.functions.sink.PulsarSink; import org.apache.pulsar.functions.worker.service.WorkerServiceLoader; +import org.apache.pulsar.utils.ResourceUtils; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -61,10 +68,16 @@ public class PulsarFunctionTlsTest { protected static final int BROKER_COUNT = 2; - private static final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private static final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - private static final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private static final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; + private final String TLS_SERVER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + private final String TLS_SERVER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + private final String TLS_CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + private final String TLS_CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); + private final String CA_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); LocalBookkeeperEnsemble bkEnsemble; protected PulsarAdmin[] pulsarAdmins = new PulsarAdmin[BROKER_COUNT]; @@ -111,9 +124,9 @@ void setup() throws Exception { config.setAuthenticationProviders(providers); config.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); config.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - config.setTlsAllowInsecureConnection(true); + config.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); config.setBrokerClientTlsEnabled(true); - config.setBrokerClientTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH); + config.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); config.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); config.setBrokerClientAuthenticationParameters( "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + ",tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); @@ -141,7 +154,12 @@ void setup() throws Exception { workerConfig.setUseTls(true); workerConfig.setTlsEnableHostnameVerification(true); workerConfig.setTlsAllowInsecureConnection(false); - workerConfig.setBrokerClientTrustCertsFilePath(TLS_SERVER_CERT_FILE_PATH); + File packagePath = new File( + PulsarSink.class.getProtectionDomain().getCodeSource().getLocation().getPath()).getParentFile(); + List urlPatterns = + List.of(packagePath.toURI() + ".*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); fnWorkerServices[i] = WorkerServiceLoader.load(workerConfig); configurations[i] = config; @@ -163,8 +181,7 @@ void setup() throws Exception { pulsarAdmins[i] = PulsarAdmin.builder() .serviceHttpUrl(pulsarServices[i].getWebServiceAddressTls()) - .tlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH) - .allowTlsInsecureConnection(true) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .authentication(authTls) .build(); } @@ -216,6 +233,13 @@ void tearDown() throws Exception { } } + @Test + public void testTLSTrustCertsConfigMapping() throws Exception { + WorkerConfig workerConfig = fnWorkerServices[0].getWorkerConfig(); + assertEquals(workerConfig.getTlsTrustCertsFilePath(), CA_CERT_FILE_PATH); + assertEquals(workerConfig.getBrokerClientTrustCertsFilePath(), CA_CERT_FILE_PATH); + } + @Test public void testFunctionsCreation() throws Exception { @@ -229,10 +253,28 @@ public void testFunctionsCreation() throws Exception { log.info(" -------- Start test function : {}", functionName); + int finalI = i; + Awaitility.await().atMost(1, TimeUnit.MINUTES).pollInterval(1, TimeUnit.SECONDS).untilAsserted(() -> { + final PulsarWorkerService workerService = ((PulsarWorkerService) fnWorkerServices[finalI]); + final LeaderService leaderService = workerService.getLeaderService(); + assertNotNull(leaderService); + if (leaderService.isLeader()) { + assertTrue(true); + } else { + final WorkerInfo workerInfo = workerService.getMembershipManager().getLeader(); + assertTrue(workerInfo != null && !workerInfo.getWorkerId().equals(workerService.getWorkerConfig().getWorkerId())); + } + }); pulsarAdmins[i].functions().createFunctionWithUrl( functionConfig, jarFilePathUrl ); + // Function creation is not strongly consistent, so this test can fail with a get that is too eager and + // does not have retries. + final PulsarAdmin admin = pulsarAdmins[i]; + Awaitility.await().ignoreExceptions() + .untilAsserted(() -> admin.functions().getFunction(testTenant, "my-ns", functionName)); + FunctionConfig config = pulsarAdmins[i].functions().getFunction(testTenant, "my-ns", functionName); assertEquals(config.getTenant(), testTenant); assertEquals(config.getNamespace(), "my-ns"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java index 0821974bea506..6226fa904885c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java @@ -174,7 +174,6 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setTopicCompactionFrequencySec(1); PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java index 9991e9f1b70fd..3c0dd0822b7dc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java @@ -35,6 +35,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -62,6 +63,7 @@ import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerService; +import org.apache.pulsar.utils.ResourceUtils; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; import org.awaitility.Awaitility; import org.slf4j.Logger; @@ -75,11 +77,16 @@ public abstract class AbstractPulsarE2ETest { public static final Logger log = LoggerFactory.getLogger(AbstractPulsarE2ETest.class); - protected final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - protected final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - protected final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - protected final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - protected final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; + protected final String TLS_SERVER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + protected final String TLS_SERVER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + protected final String TLS_CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + protected final String TLS_CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); + protected final String TLS_TRUST_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); protected final String tenant = "external-repl-prop"; protected LocalBookkeeperEnsemble bkEnsemble; @@ -300,8 +307,12 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setAuthenticationEnabled(true); workerConfig.setAuthorizationEnabled(true); + List urlPatterns = + List.of(getPulsarApiExamplesJar().getParentFile().toURI() + ".*", "http://127\\.0\\.0\\.1:.*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); + PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java index 16218f6ce6448..22b9ad0df3a69 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java @@ -51,6 +51,7 @@ import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerService; +import org.apache.pulsar.utils.ResourceUtils; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,10 +78,16 @@ public class PulsarFunctionAdminTest { String pulsarFunctionsNamespace = tenant + "/pulsar-function-admin"; String primaryHost; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; + private final String TLS_SERVER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + private final String TLS_SERVER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + private final String TLS_CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + private final String TLS_CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); + private final String TLS_TRUST_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); private static final Logger log = LoggerFactory.getLogger(PulsarFunctionAdminTest.class); @@ -113,8 +120,7 @@ void setup(Method method) throws Exception { config.setAuthenticationProviders(providers); config.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); config.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - config.setTlsAllowInsecureConnection(true); - + config.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); functionsWorkerService = createPulsarFunctionWorker(config); Optional functionWorkerService = Optional.of(functionsWorkerService); @@ -132,7 +138,6 @@ void setup(Method method) throws Exception { PulsarAdmin.builder() .serviceHttpUrl(pulsar.getWebServiceAddressTls()) .tlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH) - .allowTlsInsecureConnection(true) .authentication(authTls) .build()); @@ -203,11 +208,9 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setBrokerClientAuthenticationParameters( String.format("tlsCertFile:%s,tlsKeyFile:%s", TLS_CLIENT_CERT_FILE_PATH, TLS_CLIENT_KEY_FILE_PATH)); workerConfig.setUseTls(true); - workerConfig.setTlsAllowInsecureConnection(true); workerConfig.setTlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH); PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java index 5de3d4f7e0868..810ac69ac3eb3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java @@ -66,6 +66,7 @@ import org.apache.pulsar.functions.worker.PulsarWorkerService.PulsarClientCreator; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.rest.WorkerServer; +import org.apache.pulsar.utils.ResourceUtils; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,10 +91,16 @@ public class PulsarFunctionTlsTest { PulsarAdmin functionAdmin; private final List namespaceList = new LinkedList<>(); - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; + private final String TLS_SERVER_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.cert.pem"); + private final String TLS_SERVER_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/server-keys/broker.key-pk8.pem"); + private final String TLS_CLIENT_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.cert.pem"); + private final String TLS_CLIENT_KEY_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/client-keys/admin.key-pk8.pem"); + private final String TLS_TRUST_CERT_FILE_PATH = + ResourceUtils.getAbsolutePath("certificate-authority/certs/ca.cert.pem"); private static final Logger log = LoggerFactory.getLogger(PulsarFunctionTlsTest.class); private PulsarFunctionTestTemporaryDirectory tempDirectory; @@ -121,7 +128,7 @@ void setup(Method method) throws Exception { config.setAuthenticationProviders(providers); config.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); config.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - config.setTlsAllowInsecureConnection(true); + config.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); config.setAdvertisedAddress("localhost"); PulsarAdmin admin = mock(PulsarAdmin.class); @@ -163,7 +170,7 @@ void setup(Method method) throws Exception { authTls.configure(authParams); functionAdmin = PulsarAdmin.builder().serviceHttpUrl(functionTlsUrl) - .tlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH).allowTlsInsecureConnection(true) + .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) .authentication(authTls).build(); Thread.sleep(100); @@ -217,7 +224,7 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf String.format("tlsCertFile:%s,tlsKeyFile:%s", TLS_CLIENT_CERT_FILE_PATH, TLS_CLIENT_KEY_FILE_PATH)); workerConfig.setUseTls(true); workerConfig.setTlsAllowInsecureConnection(true); - workerConfig.setTlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH); + workerConfig.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); workerConfig.setWorkerPortTls(0); workerConfig.setTlsEnabled(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java index 0ec72b2ef470a..7eae6462545c8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java @@ -513,17 +513,13 @@ private void testUseAutoConsumeWithSchemalessTopic(SchemaType schema) throws Exc admin.topics().createPartitionedTopic(topic, 2); // set schema - if (!schema.equals(SchemaType.BYTES)) { - // don't upload bytes schema with admin API - // because null schema means the BYTES schema - SchemaInfo schemaInfo = SchemaInfoImpl - .builder() - .schema(new byte[0]) - .name("dummySchema") - .type(schema) - .build(); - admin.schemas().createSchema(topic, schemaInfo); - } + SchemaInfo schemaInfo = SchemaInfoImpl + .builder() + .schema(new byte[0]) + .name("dummySchema") + .type(schema) + .build(); + admin.schemas().createSchema(topic, schemaInfo); Producer producer = pulsarClient .newProducer() @@ -548,8 +544,7 @@ private void testUseAutoConsumeWithSchemalessTopic(SchemaType schema) throws Exc Message message2 = consumer2.receive(); if (schema == SchemaType.BYTES) { assertEquals(schema, message.getReaderSchema().get().getSchemaInfo().getType()); - // default schema of AUTO_CONSUME is BYTES - assertTrue(message2.getReaderSchema().get().toString().contains("BYTES")); + assertEquals(schema, message2.getReaderSchema().get().getSchemaInfo().getType()); } else if (schema == SchemaType.NONE) { // schema NONE is always reported as BYTES assertEquals(SchemaType.BYTES, message.getReaderSchema().get().getSchemaInfo().getType()); @@ -1295,6 +1290,33 @@ private void testIncompatibleSchema() throws Exception { assertThrows(SchemaSerializationException.class, message2::getValue); } + /** + * This test just ensure that schema check still keeps the original logic: if there has any producer, but no schema + * was registered, the new consumer could not register new schema. + * TODO: I think this design should be improved: if a producer used "AUTO_PRODUCE_BYTES" schema, we should allow + * the new consumer to register new schema. But before we can solve this problem, we need to modify + * "CmdProducer" to let the Broker know that the Producer uses a schema of type "AUTO_PRODUCE_BYTES". + */ + @Test + public void testAutoProduceAndSpecifiedConsumer() throws Exception { + final String namespace = PUBLIC_TENANT + "/ns_" + randomName(16); + admin.namespaces().createNamespace(namespace, Sets.newHashSet(CLUSTER_NAME)); + final String topicName = "persistent://" + namespace + "/tp_" + randomName(16); + admin.topics().createNonPartitionedTopic(topicName); + + Producer producer = pulsarClient.newProducer(Schema.AUTO_PRODUCE_BYTES()).topic(topicName).create(); + try { + pulsarClient.newConsumer(Schema.STRING).topic(topicName).subscriptionName("sub1").subscribe(); + fail("Should throw ex: Topic does not have schema to check"); + } catch (Exception ex){ + assertTrue(ex.getMessage().contains("Topic does not have schema to check")); + } + + // Cleanup. + producer.close(); + admin.topics().delete(topicName); + } + @Test public void testCreateSchemaInParallel() throws Exception { final String namespace = "test-namespace-" + randomName(16); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java index 140dea9e7ebc7..49517a424b936 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java @@ -407,7 +407,7 @@ public void testSchemaComparison() throws Exception { assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(namespaceName.toString()), SchemaCompatibilityStrategy.UNDEFINED); byte[] changeSchemaBytes = (new String(Schema.AVRO(Schemas.PersonOne.class) - .getSchemaInfo().getSchema(), UTF_8) + "/n /n /n").getBytes(); + .getSchemaInfo().getSchema(), UTF_8) + "\n \n \n").getBytes(); SchemaInfo schemaInfo = SchemaInfo.builder().type(SchemaType.AVRO).schema(changeSchemaBytes).build(); admin.schemas().createSchema(fqtn, schemaInfo); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/security/MockedPulsarStandalone.java b/pulsar-broker/src/test/java/org/apache/pulsar/security/MockedPulsarStandalone.java new file mode 100644 index 0000000000000..20dd2e1066a87 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/security/MockedPulsarStandalone.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.security; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Sets; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.SignatureAlgorithm; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import javax.crypto.SecretKey; +import lombok.Getter; +import lombok.SneakyThrows; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.common.util.ObjectMapperFactory; + + +public abstract class MockedPulsarStandalone implements AutoCloseable { + + @Getter + private final ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + private PulsarTestContext pulsarTestContext; + + @Getter + private PulsarService pulsarService; + private PulsarAdmin serviceInternalAdmin; + + + { + serviceConfiguration.setClusterName(TEST_CLUSTER_NAME); + serviceConfiguration.setBrokerShutdownTimeoutMs(0L); + serviceConfiguration.setBrokerServicePort(Optional.of(0)); + serviceConfiguration.setBrokerServicePortTls(Optional.of(0)); + serviceConfiguration.setAdvertisedAddress("localhost"); + serviceConfiguration.setWebServicePort(Optional.of(0)); + serviceConfiguration.setWebServicePortTls(Optional.of(0)); + serviceConfiguration.setNumExecutorThreadPoolSize(5); + serviceConfiguration.setExposeBundlesMetricsInPrometheus(true); + } + + + protected static final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + + private static final String BROKER_INTERNAL_CLIENT_SUBJECT = "broker_internal"; + private static final String BROKER_INTERNAL_CLIENT_TOKEN = Jwts.builder() + .claim("sub", BROKER_INTERNAL_CLIENT_SUBJECT).signWith(SECRET_KEY).compact(); + protected static final String SUPER_USER_SUBJECT = "super-user"; + protected static final String SUPER_USER_TOKEN = Jwts.builder() + .claim("sub", SUPER_USER_SUBJECT).signWith(SECRET_KEY).compact(); + protected static final String NOBODY_SUBJECT = "nobody"; + protected static final String NOBODY_TOKEN = Jwts.builder() + .claim("sub", NOBODY_SUBJECT).signWith(SECRET_KEY).compact(); + + + @SneakyThrows + protected void configureTokenAuthentication() { + serviceConfiguration.setAuthenticationEnabled(true); + serviceConfiguration.setAuthenticationProviders(Set.of(AuthenticationProviderToken.class.getName())); + // internal client + serviceConfiguration.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + final Map brokerClientAuthParams = new HashMap<>(); + brokerClientAuthParams.put("token", BROKER_INTERNAL_CLIENT_TOKEN); + final String brokerClientAuthParamStr = MAPPER.writeValueAsString(brokerClientAuthParams); + serviceConfiguration.setBrokerClientAuthenticationParameters(brokerClientAuthParamStr); + + Properties properties = serviceConfiguration.getProperties(); + if (properties == null) { + properties = new Properties(); + serviceConfiguration.setProperties(properties); + } + properties.put("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(SECRET_KEY)); + + } + + + + protected void configureDefaultAuthorization() { + serviceConfiguration.setAuthorizationEnabled(true); + serviceConfiguration.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); + serviceConfiguration.setSuperUserRoles(Set.of(SUPER_USER_SUBJECT, BROKER_INTERNAL_CLIENT_SUBJECT)); + } + + + @SneakyThrows + protected void start() { + this.pulsarTestContext = PulsarTestContext.builder() + .spyByDefault() + .config(serviceConfiguration) + .withMockZookeeper(false) + .build(); + this.pulsarService = pulsarTestContext.getPulsarService(); + this.serviceInternalAdmin = pulsarService.getAdminClient(); + setupDefaultTenantAndNamespace(); + } + + private void setupDefaultTenantAndNamespace() throws Exception { + if (!serviceInternalAdmin.clusters().getClusters().contains(TEST_CLUSTER_NAME)) { + serviceInternalAdmin.clusters().createCluster(TEST_CLUSTER_NAME, + ClusterData.builder().serviceUrl(pulsarService.getWebServiceAddress()).build()); + } + if (!serviceInternalAdmin.tenants().getTenants().contains(DEFAULT_TENANT)) { + serviceInternalAdmin.tenants().createTenant(DEFAULT_TENANT, TenantInfo.builder().allowedClusters( + Sets.newHashSet(TEST_CLUSTER_NAME)).build()); + } + if (!serviceInternalAdmin.namespaces().getNamespaces(DEFAULT_TENANT).contains(DEFAULT_NAMESPACE)) { + serviceInternalAdmin.namespaces().createNamespace(DEFAULT_NAMESPACE); + } + } + + + @Override + public void close() throws Exception { + if (pulsarTestContext != null) { + pulsarTestContext.close(); + } + } + + // Utils + protected static final ObjectMapper mapper = new ObjectMapper(); + + // Static name + private static final String DEFAULT_TENANT = "public"; + private static final String DEFAULT_NAMESPACE = "public/default"; + private static final String TEST_CLUSTER_NAME = "test-standalone"; + + private static final ObjectMapper MAPPER = ObjectMapperFactory.getMapper().getObjectMapper(); +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java index 47379c3b6f17c..5f8f13288cfe8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java @@ -26,7 +26,6 @@ import org.testng.annotations.Test; import java.util.ArrayList; import java.util.List; -import java.util.Random; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -188,15 +187,12 @@ public void concurrentInsertions() throws Throwable { List> futures = new ArrayList<>(); for (int i = 0; i < nThreads; i++) { final int threadIdx = i; - futures.add(executor.submit(() -> { - Random random = new Random(); + int start = N * (threadIdx + 1); for (int j = 0; j < N; j++) { - int key = random.nextInt(); + int key = start + j; // Ensure keys are unique - key -= key % (threadIdx + 1); - key = Math.abs(key); set.add(key, key); } })); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java index 3ee9b6127de7b..91cd4fab470d6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java @@ -64,12 +64,13 @@ public void setup() throws Exception { config.setWebServicePort(Optional.of(0)); config.setWebServicePortTls(Optional.of(0)); config.setBrokerClientTlsEnabled(true); - config.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - config.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - config.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); - config.setBrokerClientTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + config.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); + config.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + config.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + config.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); config.setClusterName("use"); - config.setBrokerClientAuthenticationParameters("tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + ",tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); + config.setBrokerClientAuthenticationParameters("tlsCertFile:" + getTlsFileForClient("admin.cert") + + ",tlsKeyFile:" + getTlsFileForClient("admin.key-pk8")); config.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); config.setConfigurationMetadataStoreUrl(GLOBAL_DUMMY_VALUE); service = spyWithClassAndConstructorArgs(WebSocketService.class, config); @@ -103,7 +104,7 @@ public void socketTest() throws GeneralSecurityException { SslContextFactory sslContextFactory = new SslContextFactory(); sslContextFactory.setSslContext(SecurityUtility - .createSslContext(false, SecurityUtility.loadCertificatesFromPemFile(TLS_TRUST_CERT_FILE_PATH), null)); + .createSslContext(false, SecurityUtility.loadCertificatesFromPemFile(CA_CERT_FILE_PATH), null)); WebSocketClient consumeClient = new WebSocketClient(sslContextFactory); SimpleConsumerSocket consumeSocket = new SimpleConsumerSocket(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java index 92899feda7371..a4bc69a7266cc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java @@ -21,10 +21,6 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; - -import java.util.Collections; -import java.util.List; - import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -39,22 +35,6 @@ void setup() throws Exception { void teardown() throws Exception { } - @Test - public void testAdvertisedAddress() throws Exception { - final int numBk = 1; - - LocalBookkeeperEnsemble ensemble = new LocalBookkeeperEnsemble( - numBk, 0, 0, null, null, true, "127.0.0.2"); - ensemble.startStandalone(); - - List bookies = ensemble.getZkClient().getChildren("/ledgers/available", false); - Collections.sort(bookies); - assertEquals(bookies.size(), 2); - assertTrue(bookies.get(0).startsWith("127.0.0.2:")); - - ensemble.stop(); - } - @Test public void testStartStop() throws Exception { diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/admin.cert.pem b/pulsar-broker/src/test/resources/authentication/tls-http/admin.cert.pem deleted file mode 100644 index 0665edbdc126c..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/admin.cert.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEZTCCAk2gAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v -YmFyMCAXDTE4MDYyMjA4NTcwNloYDzIyOTIwNDA2MDg1NzA2WjAQMQ4wDAYDVQQD -DAVhZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJw3Jfbn0xkW -36kqQjES6Hn+YTZ2jXS5Co2MzsGBsIY0qJ2BbWHSSaMrja4IERUaCQp16SWxPmZ0 -srMm6ErDoap+O70CWXLT3ybYMV5aVwv3ca4uxsedzaw9MpFXfUDsJJ3yre1JpO+t -A/QzJEGq1d6NN49InUP5kB1Rpay3vaxx8hduzqTO+E/Lptv92p6GjOpXi2icSjiA -pgaan2ldGGKEKv2Sc2bfdIDkTq1yDyNmuPET0yD2dci106EW/mPyj81umPKG/o4K -5W18yG/IhXw5W1zlgO1fWCuqva8NCBdu7s1c7hUX8DBx7km4/I7dllz/nYHIfCEQ -Dmj38oQjYk8CAwEAAaOBxTCBwjAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF -oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp -ZmljYXRlMB0GA1UdDgQWBBQTMzAAOJ9gXvQSS7Be3+qmrb1kVDAfBgNVHSMEGDAW -gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw -FAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0GCSqGSIb3DQEBCwUAA4ICAQAwy8f6hsG0 -85e3SOIztbUnaVxS7wzDeDzR3vCjpXpm4vTToYzN9zx3JHKSdJrB12emVxItwW/7 -bXqBk0n2EdQjRHCuebnY05eFMNGagMEEMVmSLOproQD7VsyALNxCss1JAyRikh70 -W7wgOVeAhqE53UqqrkzTE7Q+8Bag9t3FytHxApY17XglbWkiVcFpQwSURe9Emi3E -aCJCryGJXrBNuCFXGzetSygDEy27+2FeH8S2XsMwUEGLqDDehzvMenVz1xjXtq+s -KPkofAde52NHd4lLkSeBMSFnKe3V7Xxax2OEUsoQRF3bkbpcJSWsKS9ZAA2yrtuy -Nz/aA1F42LuSFPAYQr1kcZ8eSS918RWz+BiJYU2JuUOPd1XUmJXVvZ4CJurWaC7+ -ZD51YdD8E245xd55fsA6/qLx3eE/Kp0dVq+Hxuz6b4yLET0zkGunOe4A3hnRgkOA -XolXCL+VthhWtFGXn8CjpxDnzjahq69Io+dINehqd5aJEgvnHZIK2s7FTqqBBodU -HhyAE94f64z7ziuRhEG54bmBF+MoGyPf6dVn1Mp3+o+YeQ5q6XlKgh+u8jMgmqRO -ikdsVdMqopt/FXh9eFzvQrwOZFLK6JE/edUgb6xvS1jMF5zi2lIlIkq1RBQOr4HT -XDwX4vRtfpDxpsetGVpeq9O07fbMvp+mkw== ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/admin.key-pk8.pem b/pulsar-broker/src/test/resources/authentication/tls-http/admin.key-pk8.pem deleted file mode 100644 index 6aaa22c44d746..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/admin.key-pk8.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCcNyX259MZFt+p -KkIxEuh5/mE2do10uQqNjM7BgbCGNKidgW1h0kmjK42uCBEVGgkKdeklsT5mdLKz -JuhKw6Gqfju9Ally098m2DFeWlcL93GuLsbHnc2sPTKRV31A7CSd8q3tSaTvrQP0 -MyRBqtXejTePSJ1D+ZAdUaWst72scfIXbs6kzvhPy6bb/dqehozqV4tonEo4gKYG -mp9pXRhihCr9knNm33SA5E6tcg8jZrjxE9Mg9nXItdOhFv5j8o/Nbpjyhv6OCuVt -fMhvyIV8OVtc5YDtX1grqr2vDQgXbu7NXO4VF/Awce5JuPyO3ZZc/52ByHwhEA5o -9/KEI2JPAgMBAAECggEAP+Ipq2Q4puz8wGBgu1LhMWp+9Nfcl1xI3YQ01VulBe0o -+2h/g96McKcSBJaV7cw84ENB+kEWpK2amrsRiemhBmkjIvOAAv50Jp2I6u4E5Qbn -PXUxo1Z8UrCgKmHd/hvUCafByuUwBzf5AvebHyOu3JlhnD302mSHtAW8u/pUHd3D -sxJaw1zwQvmlD5ryM2IVYSji7NYCXF0H6V7HfyohTCrQFEWEAdqEDcFR1BUwbPCE -raq7sAiEy+cBUnfV3IOEAffOZy0vSR90/WwERcwrzCdZmpWpTqtbcqtdBPqsSQzX -shDvXd0e43+FSJzCtQsSQ8WzIrp3rgKJUDA1pJQW4QKBgQDJdTcB6qZz8r+4q9gc -q1KAJyMy01Vio1yaqYzXr0C9Z5FW1GhL+4fwer2y9JyD45sb/lP4reFj9S193BNR -C8cdxM5GrWEpzaQ0Dt1s8P1UbU8G6r4NqwI6ORF3CxXZKfXivQcgqBurJGrBdjIC -NwqAzSkX5flBbhfTlJuUH87k3wKBgQDGgjzdIWXab7FZfdUzzrtEwNooBiSEFixm -UAwP5sxL8VkM9wzAKPEVDDQBBoKIga9OESif4S9UUo4tu65AYfxF9Om26K4QrVj8 -HT/U+lfT7xFmPd/GINIbeTSmW0w7Ehpj8SbcQI4Sb2lVE562FlHh7QbHZd0/X/2J -nbgT9MRAkQKBgQCVPAN3o/+SPOzRPFtnQXJoBJYKfIrv+twKpjbzP5vRsvrzO33X -a4kUF5iXDKU0/lJUtl42BXjFt0Xvyit1CiiCYNv9d0pW0UMmXSyiGxNOi3rTQOlw -7pFD2Cqb6NZSfMbtI+I3ytBUQzHiBlCdW3CoYVJjpbSzR37W+WsWm0mEOQKBgQCq -ANObFYUjA1DBMZCrY7rhcL/kUw5myI6RuK/71k7UIwd+oP0cfHOq8N6AmlCkE1xM -4UkHU1SzRFhbNkZPARuJ1etqJ+8afTqd/3axMQyShkVCaG8CQQ1vVegPKFUqqaBM -QzRioC6L/zoYEEt16buKXvHVRpmqMszxVE9XV+HS4QKBgDvs5qloOowS5kcWInrj -yecu5MJvFf2IZpMw7EiKV8VUPeKaiUlqgFj9d9cotUIMauXgBq6f5NBRg7Ike60t -/JJrPtqXY+gdFLjxcKMUVYhomFlQYYg/RJZUBrtkyKBP68abopCYmb59r3ixeNNf -qA1F36mmFtzdjSdtH/dTTecN ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/broker.cert.pem b/pulsar-broker/src/test/resources/authentication/tls-http/broker.cert.pem deleted file mode 100644 index b5c7a5dc709a1..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/broker.cert.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEkDCCAnigAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v -YmFyMCAXDTE4MDYyMjA4NTUzMloYDzIyOTIwNDA2MDg1NTMyWjAjMSEwHwYDVQQD -DBhicm9rZXIucHVsc2FyLmFwYWNoZS5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQDQouKhZah4hMCqmg4aS5RhQG/Y1gA+yP9DGF9mlw35tfhfWs63 -EvNjEK4L/ZWSEV45L/wc6YV14RmM6bJ0V/0vXo4xmISbqptND/2kRIspkLZQ5F0O -OQXVicqZLOc6igZQhRg8ANDYdTJUTF65DqauX4OJt3YMhF2FSt7jQtlj06IQBa01 -+ARO9OotMJtBY+vIU5bV6JydfgkhQH9rIDI7AMeY5j02gGkJJrelfm+WoOsUez+X -aqTN3/tF8+MBcFB3G04s1qc2CJPJM3YGxvxEtHqTGI14t9J8p5O7X9JHpcY8X00s -bxa4FGbKgfDobbkJ+GgblWCkAcLN95sKTqtHAgMBAAGjgd0wgdowCQYDVR0TBAIw -ADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2Vu -ZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUaxFvJrkEGqk8azTA -DyVyTyTbJAIwQQYDVR0jBDowOIAUVwvpyyPov0c+UHo/RX6hGEOdFSehFaQTMBEx -DzANBgNVBAMMBmZvb2JhcoIJANfih0+geeIMMA4GA1UdDwEB/wQEAwIFoDATBgNV -HSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAgEA35QDGclHzQtHs3yQ -ZzNOSKisg5srTiIoQgRzfHrXfkthNFCnBzhKjBxqk3EIasVtvyGuk0ThneC1ai3y -ZK3BivnMZfm1SfyvieFoqWetsxohWfcpOSVkpvO37P6v/NmmaTIGkBN3gxKCx0QN -zqApLQyNTM++X3wxetYH/afAGUrRmBGWZuJheQpB9yZ+FB6BRp8YuYIYBzANJyW9 -spvXW03TpqX2AIoRBoGMLzK72vbhAbLWiCIfEYREhbZVRkP+yvD338cWrILlOEur -x/n8L/FTmbf7mXzHg4xaQ3zg/5+0OCPMDPUBE4xWDBAbZ82hgOcTqfVjwoPgo2V0 -fbbx6redq44J3Vn5d9Xhi59fkpqEjHpX4xebr5iMikZsNTJMeLh0h3uf7DstuO9d -mfnF5j+yDXCKb9XzCsTSvGCN+spmUh6RfSrbkw8/LrRvBUpKVEM0GfKSnaFpOaSS -efM4UEi72FRjszzHEkdvpiLhYvihINLJmDXszhc3fCi42be/DGmUhuhTZWynOPmp -0N0V/8/sGT5gh4fGEtGzS/8xEvZwO9uDlccJiG8Pi+aO0/K9urB9nppd/xKWXv3C -cib/QrW0Qow4TADWC1fnGYCpFzzaZ2esPL2MvzOYXnW4/AbEqmb6Weatluai64ZK -3N2cGJWRyvpvvmbP2hKCa4eLgEc= ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/broker.key-pk8.pem b/pulsar-broker/src/test/resources/authentication/tls-http/broker.key-pk8.pem deleted file mode 100644 index 2b51d015b8ace..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/broker.key-pk8.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDQouKhZah4hMCq -mg4aS5RhQG/Y1gA+yP9DGF9mlw35tfhfWs63EvNjEK4L/ZWSEV45L/wc6YV14RmM -6bJ0V/0vXo4xmISbqptND/2kRIspkLZQ5F0OOQXVicqZLOc6igZQhRg8ANDYdTJU -TF65DqauX4OJt3YMhF2FSt7jQtlj06IQBa01+ARO9OotMJtBY+vIU5bV6Jydfgkh -QH9rIDI7AMeY5j02gGkJJrelfm+WoOsUez+XaqTN3/tF8+MBcFB3G04s1qc2CJPJ -M3YGxvxEtHqTGI14t9J8p5O7X9JHpcY8X00sbxa4FGbKgfDobbkJ+GgblWCkAcLN -95sKTqtHAgMBAAECggEBALE1eMtfnk3nbAI74bih84D7C0Ug14p8jJv/qqBnsx4j -WrgbWDMVrJa7Rym2FQHBMMfgIwKnso0iSeJvaPz683j1lk833YKe0VQOPgD1m0IN -wV1J6mQ3OOZcKDIcerY1IBHqSmBEzR7dxIbnaxlCAX9gb0hdBK6zCwA5TMG5OQ5Y -3cGOmevK5i2PiejhpruA8h7E48P1ATaGHUZif9YD724oi6AcilQ8H/DlOjZTvlmK -r4aJ30f72NwGM8Ecet5CE2wyflAGtY0k+nChYkPRfy54u64Z/T9B53AvneFaj8jv -yFepZgRTs2cWhEl0KQGuBHQ4+IeOfMt2LebhvjWW8YkCgYEA7BXVsnqPHKRDd8wP -eNkolY4Fjdq4wu9ad+DaFiZcJuv7ugr+Kplltq6e4aU36zEdBYdPp/6KM/HGE/Xj -bo0CELNUKs/Ny9H/UJc8DDbVEmoF3XGiIbKKq1T8NTXTETFnwrGkBFD8nl7YTsOF -M4FZmSok0MhhkpEULAqxBS6YpQsCgYEA4jxM1egTVSWjTreg2UdYo2507jKa7maP -PRtoPsNJzWNbOpfj26l3/8pd6oYKWck6se6RxIUxUrk3ywhNJIIOvWEC7TaOH1c9 -T4NQNcweqBW9+A1x5gyzT14gDaBfl45gs82vI+kcpVv/w2N3HZOQZX3yAUqWpfw2 -yw1uQDXtgDUCgYEAiYPWbBXTkp1j5z3nrT7g0uxc89n5USLWkYlZvxktCEbg4+dP -UUT06EoipdD1F3wOKZA9p98uZT9pX2sUxOpBz7SFTEKq3xQ9IZZWFc9CoW08aVat -V++FsnLYTa5CeXtLsy6CGTmLTDx2xrpAtlWb+QmBVFPD8fmrxFOd9STFKS0CgYAt -6ztVN3OlFqyc75yQPXD6SxMkvdTAisSMDKIOCylRrNb5f5baIP2gR3zkeyxiqPtm -3htsHfSy67EtXpP50wQW4Dft2eLi7ZweJXMEWFfomfEjBeeWYAGNHHe5DFIauuVZ -2WexDEGqNpAlIm0s7aSjVPrn1DHbouOkNyenlMqN+QKBgQDVYVhk9widShSnCmUA -G30moXDgj3eRqCf5T7NEr9GXD1QBD/rQSPh5agnDV7IYLpV7/wkYLI7l9x7mDwu+ -I9mRXkyAmTVEctLTdXQHt0jdJa5SfUaVEDUzQbr0fUjkmythTvqZ809+d3ELPeLI -5qJ7jxgksHWji4lYfL4r4J6Zaw== ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/ca.cert.pem b/pulsar-broker/src/test/resources/authentication/tls-http/ca.cert.pem deleted file mode 100644 index 0446700135d39..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/ca.cert.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFCDCCAvCgAwIBAgIJANfih0+geeIMMA0GCSqGSIb3DQEBCwUAMBExDzANBgNV -BAMMBmZvb2JhcjAeFw0xODA2MjIwODQ2MjFaFw0zODA2MTcwODQ2MjFaMBExDzAN -BgNVBAMMBmZvb2JhcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOVU -UpTPeXCeyfUiQS824l9s9krZd4R6TA4D97eQ9EWm2D7ppV4gPApHO8j5f+joo/b6 -Iso4aFlHpJ8VV2a5Ol7rjQw43MJHaBgwDxB1XWgsNdfoI7ebtp/BWg2nM3r8wm+Z -gKenf9d1/1Ol+6yFUehkLkIXUvldiVegmmje8FnwhcDNE1eTrh66XqSJXEXqgBKu -NqsoYcVak72OyOO1/N8CESoSdyBkbSiH5vJyo0AUCjn7tULga7fxojmqBZDog9Pg -e5Fi/hbCrdinbxBrMgIxQ7wqXw2sw6iOWu4FU8Ih/CuF4xaQy2YP7MEk4Ff0LCY0 -KMhFMWU7550r/fz/C2l7fKhREyCQPa/bVE+dfxgZ/gCZ+p7vQ154hCCjpd+5bECv -SN1bcVIPG6ngQu4vMXa7QRBi/Od40jSVGVJXYY6kXvrYatad7035w2GGGGkvMsQm -y53yh4tqQfH7ulHqB0J5LebTQRp6nRizWigVCLjNkxJYI+Dj51qvT1zdyWEegKr1 -CthBfYzXlfjeH3xri1f0UABeC12n24Wkacd9af7zs7S3rYntEK444w/3fB0F62Lh -SESfMLAmUH0dF5plRShrFUXz23nUeS8EYgWmnGkpf/HDzB67vdfAK0tfJEtmmY78 -q06OSgMr+AOOqaomh4Ez2ZQG592bS71G8MrE7r2/AgMBAAGjYzBhMB0GA1UdDgQW -BBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAfBgNVHSMEGDAWgBRXC+nLI+i/Rz5Qej9F -fqEYQ50VJzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG -9w0BAQsFAAOCAgEAYd2PxdV+YOaWcmMG1fK7CGwSzDOGsgC7hi4gWPiNsVbz6fwQ -m5Ac7Zw76dzin8gzOPKST7B8WIoc7ZWrMnyh3G6A3u29Ec8iWahqGa91NPA3bOIl -0ldXnXfa416+JL/Q5utpiV6W2XDaB53v9GqpMk4rOTS9kCFOiuH5ZU8P69jp9mq6 -7pI/+hWFr+21ibmXH6ANxRLd/5+AqojRUYowAu2997Z+xmbpwx/2Svciq3LNY/Vz -s9DudUHCBHj/DPgNxsEUt8QNohjQkRbFTY0a1aXodJ/pm0Ehk2kf9KwYYYduR7ak -6UmPIPrZg6FePNahxwMZ0RtgX7EXmpiiIH1q9BsulddWkrFQclevsWO3ONQVrDs2 -gwY0HQuCRCJ+xgS2cyGiGohW5MkIsg1aI0i0j5GIUSppCIYgirAGCairARbCjhcx -pbMe8RTuBhCqO3R2wZ0wXu7P7/ArI/Ltm1dU6IeHUAUmeneVj5ie0SdA19mHTS2o -lG77N0jy6eq2zyEwJE6tuS/tyP1xrxdzXCYY7f6X9aNfsuPVQTcnrFajvDv8R6uD -YnRStVCdS6fZEP0JzsLrqp9bgLIRRsiqsVVBCgJdK1I/X59qk2EyCLXWSgk8T9XZ -iux8LlPpskt30YYt1KhlWB9zVz7k0uYAwits5foU6RfCRDPAyOa1q/QOXk0= ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/proxy.cert.pem b/pulsar-broker/src/test/resources/authentication/tls-http/proxy.cert.pem deleted file mode 100644 index 6c2f4295c9dbd..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/proxy.cert.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEZTCCAk2gAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v -YmFyMCAXDTE4MDYyNTEzNTYwNFoYDzIyOTIwNDA5MTM1NjA0WjAQMQ4wDAYDVQQD -DAVwcm94eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMieX78Yj8OW -eBHAneRaCCoO8Qrpj8zGo7h9lCdmi1lBDh1uR2sDbotiHGfJzQn836WcYmyeAvfn -qvgr9HCXmXdLgmJ3GT/LVu5GEm6msSDiZQPr9so5lQVioisK4UwJROQsE/J52cyR -9o3H6M4FKb6QpoobKa62fSfTumwwulaYaDJuRRGoGIkcRuUQ59EWAaDkD3IcDpAn -9mTbnE4Iz+JxSrsZ5DJ3X/m/AqyLWtj6GAfyK9a1dhNdlf2x4JZT1QNtojiBXt95 -OIZyRBNbHMFniq5gel6wdBkmJWutfcTct7wKa2LCxLpKoDIc1HWoL3+RUzOKxYIP -0qXEQ3bmONkCAwEAAaOBxTCBwjAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF -oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp -ZmljYXRlMB0GA1UdDgQWBBSgsgfmDbXEkrrpHUC9GnDDjxaKizAfBgNVHSMEGDAW -gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw -FAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0GCSqGSIb3DQEBCwUAA4ICAQB+uZ2OWR+G -sRqYHEeVqUI8G2p8Np8eC/onGpBL8Gj9SGxIQcIPtqHPUlCe9fd/96JOptOGRYEB -BmUCaCmQ4IgMW6e6fArka5IB4XXIgHFXyQ6ImTvjDavzlVw06zn9S4dLwVzsRBg+ -GS9svtq23W+f5rEN5N+7LhtcbclfiG4VCCqDG5VhkzEok+SRamDI8rDRZtodMw0O -/+L+xaaQPUPjX8KUlKn4uVpCDbxUzHonlCPzbkHHm5su0D4ysjJIy3/y3yow6JE/ -02L7PZkmkmw3/V+84T3X8/GD15sVUv/3v1gXEBxYwAs+RNTJ0APvMEMSvCq0AMfF -bPMZBuAGNBG7lv7TovzHgGFKXT7du5OFF/qjAsEffhbo224CB96fgwvvndwHHBFh -J06BvHZG1i9dDVhUKoB1owkWrE4RZv2ZKEtZYgizzSmzZRHtARo0t1Byc5djx1tX -TkJOHshNqJZOY1ER0DPaVQgKI+PRTbEdj/xPGRX3ebSqDmilAfPXshqgElfch6Yl -f2V58TyCnjXOibvkG9D5OyCdWLEECumOZgYar0KZgNfrvTOi1OKXnX1fsbh29fWA -ICZRcdmjkz79zQXY2SuzCWlskuXPKAmW1AMqs+l6ormmKfzIUx3Yriy4LqIIYY1v -uQD5vghZmd9HUg2KaXfSGD9stGCD8KhntA== ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/proxy.key-pk8.pem b/pulsar-broker/src/test/resources/authentication/tls-http/proxy.key-pk8.pem deleted file mode 100644 index 70e0107f2741f..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/proxy.key-pk8.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDInl+/GI/DlngR -wJ3kWggqDvEK6Y/MxqO4fZQnZotZQQ4dbkdrA26LYhxnyc0J/N+lnGJsngL356r4 -K/Rwl5l3S4Jidxk/y1buRhJuprEg4mUD6/bKOZUFYqIrCuFMCUTkLBPyednMkfaN -x+jOBSm+kKaKGymutn0n07psMLpWmGgybkURqBiJHEblEOfRFgGg5A9yHA6QJ/Zk -25xOCM/icUq7GeQyd1/5vwKsi1rY+hgH8ivWtXYTXZX9seCWU9UDbaI4gV7feTiG -ckQTWxzBZ4quYHpesHQZJiVrrX3E3Le8CmtiwsS6SqAyHNR1qC9/kVMzisWCD9Kl -xEN25jjZAgMBAAECggEAJJyCbKVW1yLGlrbIGbw0cTh41Lz6+SvnBOwl9WrJU2iD -4usVLXpa2iT1ehthx8jWJ6r6a0gK0qL8mH2tBj8kSpkFGmMRwIqjOqifBIJ3IMEw -Hh8Z0p3fjDQL1D8QDohCgkFpAn8qOCMLE6S/35khnR1Yxytd1/yFqpcBFm1uFA85 -dYisjPqWm/IZIU5rH0zgKAIhtvl9abnoi93443EHsKpRAW1gwRXx9Aak7TV768bZ -tELBsaTnXnNzamDiaimmxEOlqR9O0W8JE/31KFL26JcVmsTRG7sMpoUxCEMjOuGZ -J30bXFZUW6NrDpFsQ7uTqD6TNn2971N8KFCLnC/JYQKBgQDo82axC7L7n7BYTu18 -dupeT7n5dTBD/I3l0KtT05xiZA8GZr2i+pt+/aWzCzK4/Ee4jb4/o8CRQRB5v5mo -c9lc+BaoAIQiwiw+aufT+UojrcijrOMEL5Zk3zdZ2rcEoAsVvqtejNnwLCGI9Rnl -gp7n9oRhwDIv9Fu09snUougE3wKBgQDceAGKUB8pGd3eEya/0jU9J60LsKbcJSsN -4v1S5LiPtHOyhr0g4x/LibMP2PJhG3tJ1bgpaGmn9du2D20M6ukRhIYyn/7G+N+A -oqryyvO1MMYnhc4IEQvWrzDnBM0hV2bdjp4s/1ASVHVRwk8+orqxysIJ1D75nnRX -Tyfl6HgBRwKBgQCfVlWIhiMPv6OkU6BXgRNAHTJs8f5okmgQqNF3jgequRwZ2c6e -muIfU6myNNel9lGsZ6+Y4g4GjMWTMT4OHeewkrUUhv3atIwEyaT2tc5DZ0wUwF2r -cE1jg9bdbB/BVyMd5YRcMOWlRNpPTq8+8EB3E4RrREZPzMmplyBohGFFawKBgGjc -P0dM8nU3E1rj2wNTdPUAYQL1Y3fDyeWR+BEsLkhTeNAJ2/y/akkB1oQMGMRtMMee -ejhfrBkyC+1dCu4g8PffA4EirihvCMcDF7HhK+cbKrRzpNobWXkj3GuU0ggwrQFm -Kv+V87y0JRTdCZnuBkQ3/vBz3fwWDJnWUVC9sA5TAoGBAN4t2gJCZax5yInUgyWi -Tgumb2qVWsGBBLMTKIsrkK/KphzgAhHcVhDCybA79TmIM2FfIlpf6TE7Mv4NI055 -ZJzHX+GMT5Czy2Ku9MJD3PLTFN5MjYb6g92fKViLDMI6fwzTHB8xPJ7Ob9bV2srS -ZlmKNXTkZFk3/orK4WdCZufz ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/superproxy.cert.pem b/pulsar-broker/src/test/resources/authentication/tls-http/superproxy.cert.pem deleted file mode 100644 index 9656e2c8ba0d2..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/superproxy.cert.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEajCCAlKgAwIBAgICEAQwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v -YmFyMCAXDTE4MDYyNjEzMjUxN1oYDzIyOTIwNDEwMTMyNTE3WjAVMRMwEQYDVQQD -DApzdXBlcnByb3h5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA43Sn -ys/h3wxJ/IBEzbJdQAKCxU4of5KgTDzFoOaS8C63Nwbjgy0qcWYdRDceP4lJIKSA -1+JZ+R1opyrfVIC2D9oDJFIJTFfXy9G9VYDccwAONPgAamvRzBnYcEu8fqM+Ohle -kZltKktAHnX3WtG3RyxEL5nYzlMlGwUXJu3Rxc9SlkYSxERzjWHikGqGmXLX2qB0 -k6oyxTrK+4+EHk3khbEIqyQZSOFbD7NMfnRy0CWFv/9T1shgjAIBCake6jY7lwaT -S7JvbLfG6ABf5xHMxoWLXa2qwb+Ar43Ff9g8kKZwFOxMeGcwkzyJPBbWytFxaWn+ -R2RHhTaVCGc22CjdfQIDAQABo4HFMIHCMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEB -BAQDAgWgMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBDbGllbnQg -Q2VydGlmaWNhdGUwHQYDVR0OBBYEFHEgLhfH0Z1QlLxeQbG7YZyBlz1MMB8GA1Ud -IwQYMBaAFFcL6csj6L9HPlB6P0V+oRhDnRUnMA4GA1UdDwEB/wQEAwIF4DAdBgNV -HSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwDQYJKoZIhvcNAQELBQADggIBAK9q -3YnEa99Cq3Vo3g6PE/A/xOE97seVNuavqyBcVv9PrTydb5XG4jPkYU3xOYXIc8rA -A4gzd+AsGO9rjGMPGGjQJI3JO/3BCeBJMkn50C/rM1yWVMHnVyFcJlg16xaWtj1e -2Jk8egJW2gSYyF+N2TdzI7tOb002GNr36posnqO+IOoLapyHFBxxUjsPDRoo8fJn -myWsV1Y9oRUZyJlfIAJsu85ew7gDBY2jaiEiopzour3uU3C0N7gYni2OmVwfr6J8 -R2/Jp43BSD5sYOW9RAJIEEXef+InYtz9HTJvKu2LsWwIBkaztk29tJcDE+1La6Sw -0dF0YkUwnXoGQFjiV+8pXX3TF5glXKj1rU8WfNazF6lqslB6DmdgR3/FQ6Z2sE86 -d9hVtayZIGlzU0rWmBBtr++7Wo88nmzAtd/xbZMFG8U//+Q2AvJT2oVGtqM48+al -rnsN/gYrLDr7RC14bHIuO1v6ZL/rAi7SPKrKYAyQVTAcRuW516SxxR6S1Xa1ITnh -rwgKg13eQuwu3iigguIS9XL6nAXabBIxBxMl6o2YlyIPekKYIcQmpqhkavJ6VOgX -iq9VdY6fIJVfmxNZuwM3/28k7UeUAfhI2SSVH4ZURbPiGGH2wukc1QkmtZ2cNa35 -C1y79aqJbIa3ErqLFPj/fM+34x8L7QHPq6RfaODa ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/superproxy.key-pk8.pem b/pulsar-broker/src/test/resources/authentication/tls-http/superproxy.key-pk8.pem deleted file mode 100644 index 2e4140b8fb392..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/superproxy.key-pk8.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDjdKfKz+HfDEn8 -gETNsl1AAoLFTih/kqBMPMWg5pLwLrc3BuODLSpxZh1ENx4/iUkgpIDX4ln5HWin -Kt9UgLYP2gMkUglMV9fL0b1VgNxzAA40+ABqa9HMGdhwS7x+oz46GV6RmW0qS0Ae -dfda0bdHLEQvmdjOUyUbBRcm7dHFz1KWRhLERHONYeKQaoaZctfaoHSTqjLFOsr7 -j4QeTeSFsQirJBlI4VsPs0x+dHLQJYW//1PWyGCMAgEJqR7qNjuXBpNLsm9st8bo -AF/nEczGhYtdrarBv4CvjcV/2DyQpnAU7Ex4ZzCTPIk8FtbK0XFpaf5HZEeFNpUI -ZzbYKN19AgMBAAECggEBAM1JAwuD1drmj3wKFI8F1S2pVndXFCwXnP9RthiDIckO -kKNkX0CMKgtQ20cu6+jyMgL5FaRCkWvJxCNkCU6OIENsQ3urYuL5QTWeZeBevhg4 -y5m43z8tcptgFD09zbEKCmaLcRO9wo3yfrs/QvE/58efxyajFs8YsZuSW5Px/msl -AFN8qGY0q0lQbQPK8cPYT4OnW9isqEjkvHq1Q+ryv4cxsGWtBZYmJVeLVHzClihi -lODdN6YsDcu9jLwsA8o2WSBRsbLHK6caW4FdEwgOLckc0EGGEU64VEB9ZOwgcwYQ -M9mZDs2w7qk63YnDH4KmpoP0Oc8N+bG7Pkifd7f8HqECgYEA8esMQya6Qln4O7Qo -aI8X9t5gCpq+bh/P+7vYdBR/9St5VtE/P0yuaQdT2e5t+Ei4ujqHvJ0GTPmqYIJf -2DNvJMu8EnXv+nqKQCsyMc/nqQN1CcRGqZssH1V/ZIQLRUA0cN4hzE0eHITwp6U9 -vD/WaE3mKX/XHthIjc8hnuoajOkCgYEA8LIYMgyD3wClWOKe5TkBumI0KvA9tP90 -IFHu1wLNl0tKBpsXkzzxiav1FMX3K7B29vxukrs946KRESHzRg4c5ULPnMmwcQyx -AortKJWrGVsna/QDWPRutXSN1XmnjKWsHmTpQQqLfaRvLW+Hd34+EVdVh9sVt/y9 -RucnBvxcX3UCgYBebm/U7pMaP2BkfcigN+sU1G0M9qaK+iQHkaXGehIQs62js/5K -STZzjQawNR/8IPbqytodR/YjqflVvs6G6FzkMhrx4dORJLA+qB3pz8wP72eKLnGe -1xF8EbWumNSFbbCKtkrfIuM0IriF2Dym9QxOnsnPPTXNtoNrx4TKMXu3sQKBgBq9 -noSI8WmoF7adTsvmnnOHj4YptKFUNCGXGLLYg+DII4xCVMct4SPLb+oD6Gb5Lu5X -sy0oEkMk/3roy68/yCQMXSZtHeYhY9UFfD2jCyRBBUswC+MpHNeaAFv0LRIqIcoq -qeNo+YBW8WcZ2fIDm3+vtTfntiz/rkOfUK2tAdI1AoGAL2LVDF5e1meSRocEoy8e -gqrshrhg+KBqYmcjtd4Iup4WvR6uyH4qE9yFLLHFZ04pZzXLHcPfqgOSP8qTxgH3 -h0uqtcYmejS/yl6PbC9OPXcSkMgntRkTbU9Ug05ijfN9NRnW+8WXvi8Z6DKed1P1 -9PxwA75P9o0h00mMk8PQitQ= ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/user1.cert.pem b/pulsar-broker/src/test/resources/authentication/tls-http/user1.cert.pem deleted file mode 100644 index 072f2867fe62b..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/user1.cert.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEZTCCAk2gAwIBAgICEAUwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v -YmFyMCAXDTE4MDYyNzA4NDAyNVoYDzIyOTIwNDExMDg0MDI1WjAQMQ4wDAYDVQQD -DAV1c2VyMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM5iqgr4PUUZ -AW9MDGP5cBaSJALSPV63m6M/IoovrMWJ9CGtcQfZTUHwDorIlXgQ6H/KufmsHW0Y -OQbChLSTDB14D0jSMtyv6+ibSoE1ZEl2SbB1miLd0P5AS9YmzzEW2+bx0zJORLYD -PzJ1Nh3/kQlRs04IECki291WZiVRzX2JRoL7kMtOAoKJqQfsT14Oi9EAw39VhLeB -uc/Mx6Jsutq/YdXakoZtQbfZka2MMfLXgMDLIPDqbU+09q7au2dq8RjGzrWnxnOX -o/XQssrIbwzJJYASBsgAAtnAw7bPzCX6+cL6PZVRyiEZov0HKXyRyvrbQ5hyEMuS -3dHqoKt0fKMCAwEAAaOBxTCBwjAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF -oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp -ZmljYXRlMB0GA1UdDgQWBBQ7NSD6lx6Vq38cEoD5l7FHs1Ej8DAfBgNVHSMEGDAW -gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw -FAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0GCSqGSIb3DQEBCwUAA4ICAQC+cU7ctY7o -eTW+oHTq9EGJUMwW1fOww4QDrHtgZT4OYkO88zxQV2Cr050p8eaV5dHXZBf9/bRV -7hPNV5+HpQhb9TZ9xK2WRZ2QV7a/UDyUnksVKGSK9tNZMZPueOEB19e4bIBcgnQa -5i9sgZr93na7pFOY7lBQy6gfaOcnejYHmvVqIGaZBVH8rkEsGhhkJxy7qkpFNKgf -PGiIRo9L0WYqDCSiaICeCiteJwIfjsUFJKF0YnpXZq1kFfQscnleg60MWZAXvacp -tAciE51Ow60cqQWER66iwqnBSPD4l91SxAaGQAmalgCioGsYSbojXcOvRidhYJ2T -3YwCpqlC0qC9D2ZmNoukb1a0Pi03MuSJwD/8v9eqwEW9dFAzdnWDzTZMN9CfdjVh -2qiO5o5Si/X1Dmjdk2F/EM62YJQBAlkZBetFJ0o2QPGTSD+zrpfITIW8Pu+/5zcC -MZdzyUf0p1GO2Kn7wmqPQjz59zABagmxCNks8HeqPnzmWuADMaggb0nOmrBACE2x -b9XR6/xaXpwTRf0h5N3evivzUHo6XVw8A3gVUNoBm9Of3PlAsjM4I4SWFb6nrwYv -RnI04c+R95Su1fMc2wky0PmW+iWRTaEN/cUdX1SF6jo1nRLELGcbMSGUI6UI8kff -crvCz7uLu7Lr5/CKnEm2bSCZ4eIQpOs4nQ== ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls-http/user1.key-pk8.pem b/pulsar-broker/src/test/resources/authentication/tls-http/user1.key-pk8.pem deleted file mode 100644 index eec5c94c0665c..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls-http/user1.key-pk8.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOYqoK+D1FGQFv -TAxj+XAWkiQC0j1et5ujPyKKL6zFifQhrXEH2U1B8A6KyJV4EOh/yrn5rB1tGDkG -woS0kwwdeA9I0jLcr+vom0qBNWRJdkmwdZoi3dD+QEvWJs8xFtvm8dMyTkS2Az8y -dTYd/5EJUbNOCBApItvdVmYlUc19iUaC+5DLTgKCiakH7E9eDovRAMN/VYS3gbnP -zMeibLrav2HV2pKGbUG32ZGtjDHy14DAyyDw6m1PtPau2rtnavEYxs61p8Zzl6P1 -0LLKyG8MySWAEgbIAALZwMO2z8wl+vnC+j2VUcohGaL9Byl8kcr620OYchDLkt3R -6qCrdHyjAgMBAAECggEBAK8J8QvytAxJg/T/+7ZC1PTfp1kZNGGDuaV/o2ytuIul -T//MGQQ+IY8d6Ud9jX9SX84agxalCiP/mkYIbgK0gF7x94yccfTH433ZTxw8yzye -7SqS41JU7K7mmysaqTkKGSFK0gNlbFMud8f0rxxMJ5dOypMQtZwd63lSkLlwIqcn -YhKcAdXM4WGv07MFdwAXvrK2trhBBkCA08ZzyVy4lWE+cVfEkwH6O8EdGchl7g4U -LqIkYzufQWKdH9frt6N3qEV/qs0s1qipVzV07GaPpEdK/G5exJ7NjZaXJkHOuMbt -7ae1zJBexW41/++fjzsWSYljvSEphjqwrGYrr+tMGNECgYEA+Sg1sHcYO4Bg16qG -c8XM9g8DFL396X4MnMh+AFrDV8dbdz39x9fFtpKEfqaAZrG6I8fW3RkMLsJvJ4GI -KDJNz2ezEMbNKlXE9UzvAsrdvWNWDGJvrDu0CbJg2wtBeVEBIUYetSdNHoEwY7ZJ -QHnwbxYYrUFIJ34rI0SQ17/Z4vsCgYEA1A27jYTWr86JmvjQDUiJcJsbhpa/6OzZ -n3KTu0n0oCvl7uvvjUfhlzmcq7kyY0oO1C5TQHLiqBaI5hXw+iYwnESLIn7BwhMX -dcxglvXx95h1NqHYX9GnURl2QtrjmuY5yx+itfmZCRkRPO3c5e9uZyf01CeE4uwl -hDNh0RrSXHkCgYBvlQhmXQ+nJhk4vI+2LXFbCOISWfvqo562YDu9oOg22Xsm7cZH -x2QuHXPk3GBInXOFLqwVHHCOSFlLUgFOLykVp5VUABRFz1+Dk86+a2fetywEI9lr -QtmgNhiWQHY0BIkDA8ogytcIwEaRgUNQ8sswlK68eK39sc1T4BMV7D+CHQKBgG3g -+8lWBwSsKgOCYBQx/P27caTo4mJosE99yG0o4jhI5ulJmiSEFbINqVAWM7TdQBfU -NVFU9nuQybknr2l/dnrSzaG/Otk8mVBx6a7vnETm2/3GGV91PJS6c9wqnfu6xkGp -j99piVH8ikEfI/KFgZi0TJnOLH6FTN9W3J3EnzJJAoGAE4ZLLi+2RP4ZYXI11CJC -BSM1AEpqemgTUSidZyTiJJMGGrC7tyEba+4TIqeGgvD74p57XFbN7LOJWmnAiK8b -BLhQqPgOCXqrna00GnNKKx7AzgYHqlq03tGlX858aH18rkSRVk3UhkTkGTSr3iQn -aJeN/0HbpGr67bimf4bqlCY= ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/tls/broker-cert.pem b/pulsar-broker/src/test/resources/authentication/tls/broker-cert.pem deleted file mode 100644 index 8d0a02f24214f..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls/broker-cert.pem +++ /dev/null @@ -1,117 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 4098 (0x1002) - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, L=Palo Alto, O=Apache Software Foundation, OU=Pulsar, CN=Pulsar CA/emailAddress=dev@pulsar.apache.org - Validity - Not Before: Feb 17 17:00:44 2021 GMT - Not After : Feb 12 17:00:44 2041 GMT - Subject: C=US, ST=California, O=Apache Software Foundation, OU=Pulsar, CN=localhost/emailAddress=dev@pulsar.apache.org - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:9b:2a:6f:24:02:23:f7:ff:e6:75:61:ca:07:a8: - c0:ab:e9:8d:eb:51:2e:64:f7:9e:9b:d4:b4:be:3a: - fa:f4:6e:c6:92:8f:38:4d:08:cd:89:15:3e:2c:c4: - 99:6d:cb:58:80:fc:e0:4d:d6:7d:f6:82:ab:0d:94: - f2:e2:45:c9:d3:15:95:57:0a:6c:86:dc:78:64:3b: - 34:4b:01:7c:5d:de:4f:d4:21:1a:5d:27:a0:a5:70: - 7a:2e:02:50:e1:19:b4:b9:05:df:99:0d:8b:cc:62: - dc:10:73:fa:72:8b:38:7f:d3:56:54:61:50:bb:92: - ff:09:71:09:c7:bd:04:43:3c:8c:9c:8b:32:d1:05: - 04:8a:c6:89:d8:78:56:4d:da:2f:f4:ec:34:37:26: - b5:87:e4:3f:26:c9:41:60:ba:31:10:19:be:f8:0c: - a4:0a:85:19:59:e2:00:5d:b7:c0:bd:d1:2e:fc:a6: - 34:8b:85:2a:cc:05:f6:fb:e4:00:e6:74:95:ff:02: - 6f:43:7f:39:a7:c2:83:8e:5b:38:40:c9:42:c8:bc: - 26:72:36:35:64:c2:54:22:11:87:e8:65:8f:3d:e9: - 41:a7:6d:19:88:9a:20:9b:9a:52:e7:d2:cb:b3:e0: - 2e:8f:c1:56:54:bc:6d:14:30:73:c5:d7:8e:d0:5a: - 5e:cd - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Cert Type: - SSL Server - Netscape Comment: - OpenSSL Generated Server Certificate - X509v3 Subject Key Identifier: - 49:3C:B2:98:30:CE:7F:79:7A:C6:8B:57:CA:24:9F:12:82:1E:5D:EF - X509v3 Authority Key Identifier: - keyid:D2:B2:3D:B1:A4:7C:48:4B:36:E1:A7:DE:D8:FC:BA:92:BA:A7:C4:71 - DirName:/C=US/ST=California/L=Palo Alto/O=Apache Software Foundation/OU=Pulsar/CN=Pulsar CA/emailAddress=dev@pulsar.apache.org - serial:52:7B:B4:00:96:60:B4:26:85:BE:01:82:B8:B8:E2:8C:72:EF:5B:90 - - X509v3 Key Usage: critical - Digital Signature, Key Encipherment - X509v3 Extended Key Usage: - TLS Web Server Authentication - Signature Algorithm: sha256WithRSAEncryption - 0f:bd:af:39:0c:2c:dc:8f:7e:06:0d:27:df:35:c7:8d:5a:03: - 68:97:f6:dc:d6:d3:39:0e:b4:76:48:7d:e1:1c:a9:4b:83:fa: - 52:00:ab:28:93:2d:06:76:0c:14:35:3c:f1:8e:3b:af:c8:d0: - 27:1f:58:d4:71:22:5f:05:a6:9e:73:c6:a5:5e:2a:e6:fb:eb: - fc:73:52:87:ca:8a:2a:f9:1e:5f:e2:b9:bd:01:27:9f:7c:61: - a6:97:ad:a0:ab:4e:fb:cc:fa:c8:77:6a:65:1b:ae:60:5e:fb: - 97:14:8c:40:d7:96:c6:2c:64:59:c0:52:52:7c:2d:98:4b:f4: - 72:da:83:f7:c6:4f:32:42:ce:df:02:dd:5f:eb:58:42:f9:62: - a1:9a:05:ef:13:48:27:af:a3:7f:23:eb:e0:dc:1d:8f:96:2a: - 88:47:f7:e4:75:6f:a9:15:f6:44:f1:6d:39:3a:2c:df:a7:82: - cc:7e:aa:9c:1c:c0:a7:7d:68:31:4a:4e:21:b8:9f:17:90:4b: - f1:68:23:ef:a7:53:fc:a9:a8:35:6b:8f:4c:5e:d4:ea:b0:8a: - 27:9a:86:89:ce:f2:5d:03:35:80:fc:45:e8:87:66:0f:32:b5: - 2a:f5:1b:79:0e:09:8b:90:40:20:fb:e3:27:8a:c9:92:c1:53: - 97:10:5a:8c:50:ef:02:46:7e:ec:68:c8:1e:26:66:0e:1d:d6: - 6c:82:e7:38:14:e8:cb:45:77:29:5f:2c:1a:9d:d7:54:21:8a: - cf:0f:b7:0c:ae:fe:d6:fb:fb:c3:07:3e:33:df:59:25:1c:73: - d4:87:73:14:b4:76:16:8a:3f:82:05:7b:42:0a:55:0c:79:24: - 3c:58:31:3f:e0:3e:9f:4e:d0:0e:fd:77:b7:13:2c:d3:d0:46: - cc:80:09:0f:50:56:8b:6e:6e:91:b2:5b:c8:2f:4d:86:dc:72: - 00:de:08:0d:5e:3e:96:1f:12:7d:3b:0d:4d:71:d5:c8:a8:06: - ba:00:23:ec:10:4c:a4:c3:6f:bc:f0:d7:b1:cf:57:3f:3b:79: - db:80:87:35:c7:4e:7f:bb:38:30:0a:9f:fe:5a:86:f5:97:ce: - 24:38:79:fd:a0:dc:0b:82:11:a1:ea:0c:e9:16:65:e0:c0:54: - 80:ad:6e:55:18:ac:27:35:3a:b0:20:70:62:8e:5d:a2:33:53: - 8c:ce:f9:ee:a1:27:cb:db:e5:9a:5e:e6:f7:80:93:84:63:04: - 26:58:ab:23:bb:94:80:d0:a0:55:a2:8a:ed:bc:0f:c3:41:d2: - 26:a5:b9:8d:8a:45:e8:a1:fc:e8:ee:7a:64:93:ed:d6:ef:a2: - 51:d7:c9:0a:31:39:35:4a ------BEGIN CERTIFICATE----- -MIIGPDCCBCSgAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwgaYxCzAJBgNVBAYTAlVT -MRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xIzAhBgNV -BAoMGkFwYWNoZSBTb2Z0d2FyZSBGb3VuZGF0aW9uMQ8wDQYDVQQLDAZQdWxzYXIx -EjAQBgNVBAMMCVB1bHNhciBDQTEkMCIGCSqGSIb3DQEJARYVZGV2QHB1bHNhci5h -cGFjaGUub3JnMB4XDTIxMDIxNzE3MDA0NFoXDTQxMDIxMjE3MDA0NFowgZIxCzAJ -BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMSMwIQYDVQQKDBpBcGFjaGUg -U29mdHdhcmUgRm91bmRhdGlvbjEPMA0GA1UECwwGUHVsc2FyMRIwEAYDVQQDDAls -b2NhbGhvc3QxJDAiBgkqhkiG9w0BCQEWFWRldkBwdWxzYXIuYXBhY2hlLm9yZzCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJsqbyQCI/f/5nVhygeowKvp -jetRLmT3npvUtL46+vRuxpKPOE0IzYkVPizEmW3LWID84E3WffaCqw2U8uJFydMV -lVcKbIbceGQ7NEsBfF3eT9QhGl0noKVwei4CUOEZtLkF35kNi8xi3BBz+nKLOH/T -VlRhULuS/wlxCce9BEM8jJyLMtEFBIrGidh4Vk3aL/TsNDcmtYfkPybJQWC6MRAZ -vvgMpAqFGVniAF23wL3RLvymNIuFKswF9vvkAOZ0lf8Cb0N/OafCg45bOEDJQsi8 -JnI2NWTCVCIRh+hljz3pQadtGYiaIJuaUufSy7PgLo/BVlS8bRQwc8XXjtBaXs0C -AwEAAaOCAYQwggGAMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCG -SAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUw -HQYDVR0OBBYEFEk8spgwzn95esaLV8oknxKCHl3vMIHmBgNVHSMEgd4wgduAFNKy -PbGkfEhLNuGn3tj8upK6p8RxoYGspIGpMIGmMQswCQYDVQQGEwJVUzETMBEGA1UE -CAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMSMwIQYDVQQKDBpBcGFj -aGUgU29mdHdhcmUgRm91bmRhdGlvbjEPMA0GA1UECwwGUHVsc2FyMRIwEAYDVQQD -DAlQdWxzYXIgQ0ExJDAiBgkqhkiG9w0BCQEWFWRldkBwdWxzYXIuYXBhY2hlLm9y -Z4IUUnu0AJZgtCaFvgGCuLjijHLvW5AwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQM -MAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4ICAQAPva85DCzcj34GDSffNceN -WgNol/bc1tM5DrR2SH3hHKlLg/pSAKsoky0GdgwUNTzxjjuvyNAnH1jUcSJfBaae -c8alXirm++v8c1KHyooq+R5f4rm9ASeffGGml62gq077zPrId2plG65gXvuXFIxA -15bGLGRZwFJSfC2YS/Ry2oP3xk8yQs7fAt1f61hC+WKhmgXvE0gnr6N/I+vg3B2P -liqIR/fkdW+pFfZE8W05Oizfp4LMfqqcHMCnfWgxSk4huJ8XkEvxaCPvp1P8qag1 -a49MXtTqsIonmoaJzvJdAzWA/EXoh2YPMrUq9Rt5DgmLkEAg++MnismSwVOXEFqM -UO8CRn7saMgeJmYOHdZsguc4FOjLRXcpXywanddUIYrPD7cMrv7W+/vDBz4z31kl -HHPUh3MUtHYWij+CBXtCClUMeSQ8WDE/4D6fTtAO/Xe3EyzT0EbMgAkPUFaLbm6R -slvIL02G3HIA3ggNXj6WHxJ9Ow1NcdXIqAa6ACPsEEykw2+88Nexz1c/O3nbgIc1 -x05/uzgwCp/+Wob1l84kOHn9oNwLghGh6gzpFmXgwFSArW5VGKwnNTqwIHBijl2i -M1OMzvnuoSfL2+WaXub3gJOEYwQmWKsju5SA0KBVoortvA/DQdImpbmNikXoofzo -7npkk+3W76JR18kKMTk1Sg== ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls/broker-key.pem b/pulsar-broker/src/test/resources/authentication/tls/broker-key.pem deleted file mode 100644 index ee03e754beee1..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls/broker-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAmypvJAIj9//mdWHKB6jAq+mN61EuZPeem9S0vjr69G7Gko84 -TQjNiRU+LMSZbctYgPzgTdZ99oKrDZTy4kXJ0xWVVwpshtx4ZDs0SwF8Xd5P1CEa -XSegpXB6LgJQ4Rm0uQXfmQ2LzGLcEHP6cos4f9NWVGFQu5L/CXEJx70EQzyMnIsy -0QUEisaJ2HhWTdov9Ow0Nya1h+Q/JslBYLoxEBm++AykCoUZWeIAXbfAvdEu/KY0 -i4UqzAX2++QA5nSV/wJvQ385p8KDjls4QMlCyLwmcjY1ZMJUIhGH6GWPPelBp20Z -iJogm5pS59LLs+Auj8FWVLxtFDBzxdeO0FpezQIDAQABAoIBAG9pk63mP49l1kM4 -eQjw2Y9WvslVXBuxVNiNbU4eKW1zUO+RGJrvlC027JLWg1g7pwvPBvu85GspPcsd -xRxFgfonyDhcSrq2+Vb2z8B/i54W73jgX/69YnMIBSKeFRbcD1C+7+MEv/l8jojd -zdmLL4FQ7O7fhUl57dgIqz4Y8UOYyyBsPpz3pzJLFEb5rE/ajqmFzyl+dO+8140B -niQ0+7+tAK0njX8OC0WN844GkO24WPCfWhUFrYGkfLq498eRUCWM2YP2tAJ+Uxnh -v3K9icDwOX6PJXYlbvNEUCE+t60NoDYHcMpfzUdFEhBYpKadfKE/RFFcu0vAZ+aR -y24oAuECgYEAyPLYXWIs88pPHQhSf2DAMRref5eeV+XA6Dy/P+z8z0bA7I6X9dl6 -AK6rRKGJl9HI7c/Gky6P10fymopYopNkClXm7SBTLKx0vfjil0U6Mx5ZsfDspE3q -0o9MJKVgobCxVZlLErU55XzktKwjlv2UvDX7VuxRndqN9qdf+YSMb9kCgYEAxayx -sOrJcPZVfy3Ohy5CeStF+E2dtfcKB7M7xZxZqykVy+6J1XjXHmp1L7Wpi0ju57Hi -l2ZqKasHDwtlLOnfSTbvC47hsa1ydnoFTjJBObR1wS43oVkyV0AHid4w81ddOWPC -H0ZmhvNe7pUxm5crpxsY6hAAraJ4Hej23MOxghUCgYEAip26UvCeQa2U1VogTm3X -Jgh641kbiVabs5fz9Yzs966+9m+Gs7jJSB81Vap415mHGUTyniTIZKDk4WX9rmgt -4lNPcNOTjIWKImHFLMQ8WXbeOLkRBGYbThQ7WiwadG8GZR3Rg54vyfZVbawxAL78 -ErjKIDP0OQfCVhsvQVgF6EECgYEAlQ2P+xA/Dv+gHkLjDUmTdBxuKToVZqU9merL -cklfz9EuD1Tx99ajltq9PFll25IGGw0mB/WAraS5sN1tz/0VkfZrL7LwefKIcc+2 -em0og6OQezcnWXGRpPqx9IJnNMY2lFSlhsGmA7I1bf9vpZvKnbmwAqZIbKUqn5sP -sg2ZprUCgYEApAVD+9wXfZE/YDHVZX1k6p38ORqjq/04AJkL/LmUW5DL5to1+1KQ -Q438HzMtYIq7aZyzWmlF6DmyN5mxKKK3yY79p0rvdV74AoT+ucDzM3ge0Md7liCs -0GwNnDSiPzdau738UoIKc1VbF7dMDL3LzqnfrBUCr7nXRbR3BHHuqws= ------END RSA PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/tls/cacert.pem b/pulsar-broker/src/test/resources/authentication/tls/cacert.pem deleted file mode 100644 index 6abfc2d80c123..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls/cacert.pem +++ /dev/null @@ -1,127 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: - 52:7b:b4:00:96:60:b4:26:85:be:01:82:b8:b8:e2:8c:72:ef:5b:90 - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, L=Palo Alto, O=Apache Software Foundation, OU=Pulsar, CN=Pulsar CA/emailAddress=dev@pulsar.apache.org - Validity - Not Before: Feb 17 16:43:44 2021 GMT - Not After : Feb 12 16:43:44 2041 GMT - Subject: C=US, ST=California, L=Palo Alto, O=Apache Software Foundation, OU=Pulsar, CN=Pulsar CA/emailAddress=dev@pulsar.apache.org - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (4096 bit) - Modulus: - 00:b1:3c:7d:ab:4a:54:72:37:2a:92:94:0a:66:46: - af:8c:ed:f4:2e:f3:87:1a:d0:c7:9d:23:35:1b:61: - 74:69:ca:f7:f5:3e:95:9c:86:f2:21:34:f8:0b:ed: - 45:76:22:ec:75:52:c0:67:db:2f:ba:da:25:3f:e1: - 5b:ac:da:15:dd:a5:75:24:b2:12:f0:b0:ce:fd:ab: - 44:06:a9:09:f6:b0:8e:8f:83:53:16:69:fa:9c:cc: - 00:fa:dd:13:f3:da:fd:f2:bf:88:8e:c4:f8:1a:6f: - ab:4d:f8:32:81:80:7e:51:7a:99:2d:94:cd:f3:5d: - 1c:58:b2:44:f1:96:12:46:56:bd:60:8f:65:32:b7: - d4:4b:7b:f3:23:88:2d:9b:a4:c4:c9:52:ea:9f:66: - c1:74:be:4b:91:c6:b9:57:ec:c1:cc:81:bb:03:d5: - fa:a0:46:4f:9a:a7:3e:3c:27:26:2b:97:eb:69:53: - 04:75:50:97:d6:0d:90:b1:37:9f:64:df:70:4d:d9: - b3:e3:b7:cc:76:50:d9:3c:9b:4c:ac:e9:26:2e:cf: - ac:47:42:14:b7:60:00:0a:de:42:47:66:0c:c7:7a: - b9:4d:f4:fb:c2:6a:45:78:ec:b0:b4:ce:b3:1f:50: - 25:96:13:0c:55:0a:e0:d6:76:f7:1f:e1:16:e6:41: - d6:72:6a:49:17:12:d9:05:8f:dc:56:b6:31:b3:b7: - 9c:e3:d8:a9:99:8a:1d:3b:9d:d9:59:44:ee:46:88: - 11:5f:ab:fa:38:a9:8b:d2:23:15:8b:af:1a:de:66: - ba:7d:51:95:37:94:91:aa:01:01:d7:83:19:4b:5d: - 8d:f4:18:39:ef:e3:32:d0:62:c8:12:50:4e:91:c2: - ac:58:73:68:bb:92:20:fc:14:e5:1a:86:bd:40:4c: - 94:e0:7d:0d:9c:08:57:ae:00:44:38:94:a3:3d:64: - 99:43:f8:e3:12:90:14:0f:5d:63:e2:c6:07:ea:d0: - 4c:8e:cf:e0:ae:34:be:86:4f:fc:58:e2:ea:f5:23: - 82:37:96:02:57:1b:b4:29:ca:fd:68:a0:48:79:e8: - 31:97:9a:5a:0e:2b:b4:b0:84:bb:57:4e:5f:4f:a7: - 43:45:97:d7:de:05:fc:2f:6c:3e:f5:53:26:56:a3: - a5:da:52:69:57:8e:a0:4b:27:50:f9:ad:6e:76:a6: - 29:cc:06:94:dd:d0:ac:c6:18:22:a0:e2:bb:ed:d5: - e4:97:f7:ac:23:df:75:30:41:97:07:3f:d3:12:8e: - c5:a4:ef:ce:40:e8:3b:57:24:19:33:1b:ee:8a:0e: - dd:0c:70:f2:1a:87:35:d9:71:d8:18:a7:9c:47:db: - 93:51:c3 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Key Identifier: - D2:B2:3D:B1:A4:7C:48:4B:36:E1:A7:DE:D8:FC:BA:92:BA:A7:C4:71 - X509v3 Authority Key Identifier: - keyid:D2:B2:3D:B1:A4:7C:48:4B:36:E1:A7:DE:D8:FC:BA:92:BA:A7:C4:71 - - X509v3 Basic Constraints: critical - CA:TRUE - X509v3 Key Usage: critical - Digital Signature, Certificate Sign, CRL Sign - Signature Algorithm: sha256WithRSAEncryption - 14:3d:7c:15:86:de:aa:5a:30:5d:d4:f2:bc:5f:10:d2:af:fe: - 91:d7:ee:f3:b8:5f:ce:e4:c9:b2:01:c3:16:da:66:8e:7e:b1: - c1:e3:30:ff:1d:73:d0:9c:20:3d:54:32:57:ae:07:80:4a:24: - 6e:7e:32:a3:e7:23:4d:5c:31:54:8b:c1:1b:c5:bc:20:5d:43: - 62:93:e0:2e:a7:01:77:39:cf:fd:ec:4c:57:09:4f:2b:ad:ac: - b6:c0:be:5a:a3:ea:12:ac:5a:7f:60:23:81:bb:9a:fa:5f:7a: - 67:a9:31:c3:34:af:db:ff:32:22:83:40:c2:7d:2f:39:5e:8a: - 29:44:73:5f:6e:b4:f4:a2:ae:60:1f:8e:ef:91:9a:49:bb:a6: - 90:2b:e0:44:95:24:8b:37:90:18:2d:41:32:8a:8e:07:8d:ea: - 75:62:b8:9c:ec:73:6f:12:54:23:6d:40:00:74:c7:d3:fb:b7: - 95:06:7d:cc:6d:8e:2c:d0:8b:11:06:8a:b7:43:1a:d7:e9:98: - f4:c6:ef:ad:2a:75:08:fb:07:8f:20:36:7a:86:1a:cf:f7:d6: - 96:ad:ed:71:59:d1:81:56:18:8d:98:c2:c0:44:e5:29:7a:7c: - c0:e3:d7:fb:b8:f5:b2:50:53:8a:cf:38:ff:99:aa:bb:28:51: - 60:e8:05:91:e1:ee:86:90:90:9b:87:60:63:38:cf:54:a5:82: - 74:0f:40:b5:d2:6a:c5:a9:98:22:59:4e:fb:a5:81:e2:7b:0e: - 3f:71:f3:24:17:1e:c5:89:fc:ae:ed:f3:69:65:02:b8:1e:98: - bc:37:c6:25:36:f8:ca:99:60:8e:13:3b:33:ec:91:b3:eb:04: - 6d:41:97:3e:35:c0:97:ed:66:12:25:44:23:f3:2e:fa:9c:2e: - c2:ba:dd:f3:63:d7:5b:b2:72:03:4d:3b:fb:5e:29:d6:5c:02: - 32:93:47:d1:4c:77:4a:58:c5:aa:81:ab:67:84:80:81:14:28: - e1:db:11:16:6d:31:50:7a:47:b2:a8:2d:15:a1:c4:63:1b:ce: - d5:e1:d7:57:dc:1a:71:e0:55:9f:6d:fb:be:e6:99:e8:89:be: - 2c:e0:19:5e:cd:02:79:52:ee:93:56:9f:dc:d7:de:31:9b:2a: - c8:91:48:a0:c7:44:7d:72:32:27:c3:2b:d8:e8:6b:94:67:b5: - 1d:9d:99:25:23:d9:24:b5:ed:4b:f2:18:2d:88:f5:d4:36:bb: - 53:8c:a8:b1:7f:05:13:d7:8d:89:9d:55:33:90:bc:60:99:cf: - 05:ba:bd:cb:c5:61:f9:c5:1a:f7:46:9c:40:90:dd:83:aa:7a: - 1f:ab:5c:10:8d:26:27:1e ------BEGIN CERTIFICATE----- -MIIGPzCCBCegAwIBAgIUUnu0AJZgtCaFvgGCuLjijHLvW5AwDQYJKoZIhvcNAQEL -BQAwgaYxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQH -DAlQYWxvIEFsdG8xIzAhBgNVBAoMGkFwYWNoZSBTb2Z0d2FyZSBGb3VuZGF0aW9u -MQ8wDQYDVQQLDAZQdWxzYXIxEjAQBgNVBAMMCVB1bHNhciBDQTEkMCIGCSqGSIb3 -DQEJARYVZGV2QHB1bHNhci5hcGFjaGUub3JnMB4XDTIxMDIxNzE2NDM0NFoXDTQx -MDIxMjE2NDM0NFowgaYxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh -MRIwEAYDVQQHDAlQYWxvIEFsdG8xIzAhBgNVBAoMGkFwYWNoZSBTb2Z0d2FyZSBG -b3VuZGF0aW9uMQ8wDQYDVQQLDAZQdWxzYXIxEjAQBgNVBAMMCVB1bHNhciBDQTEk -MCIGCSqGSIb3DQEJARYVZGV2QHB1bHNhci5hcGFjaGUub3JnMIICIjANBgkqhkiG -9w0BAQEFAAOCAg8AMIICCgKCAgEAsTx9q0pUcjcqkpQKZkavjO30LvOHGtDHnSM1 -G2F0acr39T6VnIbyITT4C+1FdiLsdVLAZ9svutolP+FbrNoV3aV1JLIS8LDO/atE -BqkJ9rCOj4NTFmn6nMwA+t0T89r98r+IjsT4Gm+rTfgygYB+UXqZLZTN810cWLJE -8ZYSRla9YI9lMrfUS3vzI4gtm6TEyVLqn2bBdL5Lkca5V+zBzIG7A9X6oEZPmqc+ -PCcmK5fraVMEdVCX1g2QsTefZN9wTdmz47fMdlDZPJtMrOkmLs+sR0IUt2AACt5C -R2YMx3q5TfT7wmpFeOywtM6zH1AllhMMVQrg1nb3H+EW5kHWcmpJFxLZBY/cVrYx -s7ec49ipmYodO53ZWUTuRogRX6v6OKmL0iMVi68a3ma6fVGVN5SRqgEB14MZS12N -9Bg57+My0GLIElBOkcKsWHNou5Ig/BTlGoa9QEyU4H0NnAhXrgBEOJSjPWSZQ/jj -EpAUD11j4sYH6tBMjs/grjS+hk/8WOLq9SOCN5YCVxu0Kcr9aKBIeegxl5paDiu0 -sIS7V05fT6dDRZfX3gX8L2w+9VMmVqOl2lJpV46gSydQ+a1udqYpzAaU3dCsxhgi -oOK77dXkl/esI991MEGXBz/TEo7FpO/OQOg7VyQZMxvuig7dDHDyGoc12XHYGKec -R9uTUcMCAwEAAaNjMGEwHQYDVR0OBBYEFNKyPbGkfEhLNuGn3tj8upK6p8RxMB8G -A1UdIwQYMBaAFNKyPbGkfEhLNuGn3tj8upK6p8RxMA8GA1UdEwEB/wQFMAMBAf8w -DgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQAUPXwVht6qWjBd1PK8 -XxDSr/6R1+7zuF/O5MmyAcMW2maOfrHB4zD/HXPQnCA9VDJXrgeASiRufjKj5yNN -XDFUi8EbxbwgXUNik+AupwF3Oc/97ExXCU8rray2wL5ao+oSrFp/YCOBu5r6X3pn -qTHDNK/b/zIig0DCfS85XoopRHNfbrT0oq5gH47vkZpJu6aQK+BElSSLN5AYLUEy -io4Hjep1Yric7HNvElQjbUAAdMfT+7eVBn3MbY4s0IsRBoq3QxrX6Zj0xu+tKnUI -+wePIDZ6hhrP99aWre1xWdGBVhiNmMLAROUpenzA49f7uPWyUFOKzzj/maq7KFFg -6AWR4e6GkJCbh2BjOM9UpYJ0D0C10mrFqZgiWU77pYHiew4/cfMkFx7Fifyu7fNp -ZQK4Hpi8N8YlNvjKmWCOEzsz7JGz6wRtQZc+NcCX7WYSJUQj8y76nC7Cut3zY9db -snIDTTv7XinWXAIyk0fRTHdKWMWqgatnhICBFCjh2xEWbTFQekeyqC0VocRjG87V -4ddX3Bpx4FWfbfu+5pnoib4s4BlezQJ5Uu6TVp/c194xmyrIkUigx0R9cjInwyvY -6GuUZ7UdnZklI9kkte1L8hgtiPXUNrtTjKixfwUT142JnVUzkLxgmc8Fur3LxWH5 -xRr3RpxAkN2Dqnofq1wQjSYnHg== ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls/client-cert.pem b/pulsar-broker/src/test/resources/authentication/tls/client-cert.pem deleted file mode 100644 index 45f3cde215fe5..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls/client-cert.pem +++ /dev/null @@ -1,90 +0,0 @@ -Certificate: - Data: - Version: 1 (0x0) - Serial Number: 4097 (0x1001) - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, L=Palo Alto, O=Apache Software Foundation, OU=Pulsar, CN=Pulsar CA/emailAddress=dev@pulsar.apache.org - Validity - Not Before: Feb 17 16:56:55 2021 GMT - Not After : Feb 12 16:56:55 2041 GMT - Subject: C=US, ST=California, O=Apache Software Foundation, OU=Pulsar, CN=admin/emailAddress=dev@pulsar.apache.org - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:ab:61:f5:12:b1:e1:ae:19:01:3e:59:4a:c6:ca: - 00:0c:96:e8:76:3a:83:20:d9:af:3a:e1:11:20:12: - e0:e4:d0:70:8f:4b:7b:af:e1:89:ef:9b:c5:a9:c2: - ed:ae:24:8d:bb:42:6e:ec:59:11:3f:f5:63:59:61: - 18:9f:70:b6:76:88:e2:ca:79:15:cc:fb:9c:5e:5c: - bb:a1:d7:f0:d8:11:d4:17:34:1e:81:7e:0b:0e:05: - be:5d:fa:d6:46:af:e1:95:d8:a0:5d:c5:2f:d9:a9: - 8f:69:64:49:95:f7:42:16:6a:84:2b:2e:af:91:73: - 3d:b6:d4:44:56:9a:61:43:49:15:22:ae:90:5d:04: - 29:90:4e:b2:41:34:73:3e:a2:48:05:1c:bc:8e:1b: - 0b:c1:d5:df:56:32:40:e9:91:a2:7b:de:31:2b:67: - f1:8e:d6:c5:c0:87:57:70:29:f9:af:db:57:a0:2e: - 8c:30:0a:a7:47:39:33:4c:d7:2d:32:aa:48:29:bd: - c4:48:c5:58:52:07:c4:99:b1:cc:66:da:ac:28:4d: - c1:bc:1f:44:3f:a3:63:61:bd:ff:48:61:76:04:b2: - 7d:1c:6e:9c:ee:82:bb:f7:60:1c:7a:a0:98:be:2d: - 70:43:2f:64:bf:d2:0f:20:25:f7:c7:7d:70:05:b8: - 2e:bf - Exponent: 65537 (0x10001) - Signature Algorithm: sha256WithRSAEncryption - 1c:31:b8:0f:a1:03:28:a0:da:31:ec:34:ce:e0:fd:01:99:9d: - 9b:ad:f8:03:5d:20:85:18:de:ca:b5:ea:61:c9:3b:65:42:9c: - e5:21:73:d2:06:41:4b:a9:3a:fb:7f:ff:45:f3:5a:4a:ab:5a: - 86:cd:57:6a:5f:13:c0:ae:7e:ad:5c:6e:c3:c4:e7:b7:d3:14: - bf:86:fe:f2:d1:70:0e:fc:98:50:a7:fe:53:62:5a:2d:f5:63: - 2c:ee:4a:7c:dd:32:3e:d1:52:3a:1f:15:38:4b:2a:4a:ee:27: - a9:d8:92:a8:33:92:83:c9:3a:09:5a:01:66:0e:68:da:8f:82: - c0:18:cc:78:ea:c5:db:09:7c:2f:61:c3:51:f8:58:7a:27:d7: - 92:c0:ff:f8:29:d7:a0:e9:54:17:8d:48:a8:ff:5e:92:ee:81: - 6c:37:90:1c:93:28:8c:d2:f5:b1:20:96:d3:1d:0f:c0:7f:db: - 0c:6d:65:7f:3a:55:e5:c9:9a:ad:09:91:a5:57:cb:fc:bf:df: - 69:bd:6b:87:94:5b:d0:cf:3b:8b:48:41:3d:56:b6:1d:3f:e7: - f6:b6:58:f7:54:2a:dd:da:60:68:db:9b:70:04:8b:19:c3:44: - bf:1d:b4:28:b9:f8:ea:ad:d3:1a:6e:64:72:b1:61:6a:f3:e1: - d4:68:56:7b:0e:ad:4c:53:1e:d2:2e:1c:bc:b7:82:59:af:65: - d2:fd:ef:89:7c:34:8f:51:a1:4e:9d:7e:dc:c7:97:68:ea:aa: - e5:67:ed:be:dc:38:74:0e:c3:6f:fd:08:62:54:d8:1f:15:d1: - 25:fc:21:f6:8c:f9:2f:65:5e:07:b9:e9:56:ba:48:14:5c:0d: - 18:ba:f8:83:54:5b:b6:27:0c:36:2c:20:29:9c:c2:68:c5:3a: - 0f:a5:d6:5f:7c:aa:f9:a6:2a:2b:69:c5:b1:39:e7:1c:02:31: - 5b:f5:82:de:c9:4e:8d:33:dc:94:02:44:0a:44:95:75:7b:a1: - e7:ee:92:fc:35:93:73:8c:22:c1:32:ea:39:17:ca:d0:87:fc: - 4d:8e:04:f8:59:66:d3:14:3f:59:ad:76:14:20:16:7b:77:4f: - 94:58:f8:85:5c:ba:b3:69:ed:7f:75:54:9a:1a:88:21:5d:04: - 57:87:85:e2:d4:0e:1b:61:7f:5d:36:dc:72:a1:9d:0b:c8:ce: - 19:69:49:fa:1b:bb:3f:3d:1b:4d:81:42:95:4e:d8:0b:04:d1: - 08:6d:15:b3:ae:52:41:12:ff:e1:90:c4:7d:52:88:55:8b:87: - 83:06:48:8b:fc:3a:a7:47:0e:6c:a8:4c:9e:b0:aa:da:50:f5: - 97:97:98:3e:9d:18:ef:43 ------BEGIN CERTIFICATE----- -MIIEqzCCApMCAhABMA0GCSqGSIb3DQEBCwUAMIGmMQswCQYDVQQGEwJVUzETMBEG -A1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMSMwIQYDVQQKDBpB -cGFjaGUgU29mdHdhcmUgRm91bmRhdGlvbjEPMA0GA1UECwwGUHVsc2FyMRIwEAYD -VQQDDAlQdWxzYXIgQ0ExJDAiBgkqhkiG9w0BCQEWFWRldkBwdWxzYXIuYXBhY2hl -Lm9yZzAeFw0yMTAyMTcxNjU2NTVaFw00MTAyMTIxNjU2NTVaMIGOMQswCQYDVQQG -EwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEjMCEGA1UECgwaQXBhY2hlIFNvZnR3 -YXJlIEZvdW5kYXRpb24xDzANBgNVBAsMBlB1bHNhcjEOMAwGA1UEAwwFYWRtaW4x -JDAiBgkqhkiG9w0BCQEWFWRldkBwdWxzYXIuYXBhY2hlLm9yZzCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAKth9RKx4a4ZAT5ZSsbKAAyW6HY6gyDZrzrh -ESAS4OTQcI9Le6/hie+bxanC7a4kjbtCbuxZET/1Y1lhGJ9wtnaI4sp5Fcz7nF5c -u6HX8NgR1Bc0HoF+Cw4Fvl361kav4ZXYoF3FL9mpj2lkSZX3QhZqhCsur5FzPbbU -RFaaYUNJFSKukF0EKZBOskE0cz6iSAUcvI4bC8HV31YyQOmRonveMStn8Y7WxcCH -V3Ap+a/bV6AujDAKp0c5M0zXLTKqSCm9xEjFWFIHxJmxzGbarChNwbwfRD+jY2G9 -/0hhdgSyfRxunO6Cu/dgHHqgmL4tcEMvZL/SDyAl98d9cAW4Lr8CAwEAATANBgkq -hkiG9w0BAQsFAAOCAgEAHDG4D6EDKKDaMew0zuD9AZmdm634A10ghRjeyrXqYck7 -ZUKc5SFz0gZBS6k6+3//RfNaSqtahs1Xal8TwK5+rVxuw8Tnt9MUv4b+8tFwDvyY -UKf+U2JaLfVjLO5KfN0yPtFSOh8VOEsqSu4nqdiSqDOSg8k6CVoBZg5o2o+CwBjM -eOrF2wl8L2HDUfhYeifXksD/+CnXoOlUF41IqP9eku6BbDeQHJMojNL1sSCW0x0P -wH/bDG1lfzpV5cmarQmRpVfL/L/fab1rh5Rb0M87i0hBPVa2HT/n9rZY91Qq3dpg -aNubcASLGcNEvx20KLn46q3TGm5kcrFhavPh1GhWew6tTFMe0i4cvLeCWa9l0v3v -iXw0j1GhTp1+3MeXaOqq5Wftvtw4dA7Db/0IYlTYHxXRJfwh9oz5L2VeB7npVrpI -FFwNGLr4g1RbticMNiwgKZzCaMU6D6XWX3yq+aYqK2nFsTnnHAIxW/WC3slOjTPc -lAJECkSVdXuh5+6S/DWTc4wiwTLqORfK0If8TY4E+Flm0xQ/Wa12FCAWe3dPlFj4 -hVy6s2ntf3VUmhqIIV0EV4eF4tQOG2F/XTbccqGdC8jOGWlJ+hu7Pz0bTYFClU7Y -CwTRCG0Vs65SQRL/4ZDEfVKIVYuHgwZIi/w6p0cObKhMnrCq2lD1l5eYPp0Y70M= ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/authentication/tls/client-key.pem b/pulsar-broker/src/test/resources/authentication/tls/client-key.pem deleted file mode 100644 index e12697c966a9c..0000000000000 --- a/pulsar-broker/src/test/resources/authentication/tls/client-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEoQIBAAKCAQEAq2H1ErHhrhkBPllKxsoADJbodjqDINmvOuERIBLg5NBwj0t7 -r+GJ75vFqcLtriSNu0Ju7FkRP/VjWWEYn3C2dojiynkVzPucXly7odfw2BHUFzQe -gX4LDgW+XfrWRq/hldigXcUv2amPaWRJlfdCFmqEKy6vkXM9ttREVpphQ0kVIq6Q -XQQpkE6yQTRzPqJIBRy8jhsLwdXfVjJA6ZGie94xK2fxjtbFwIdXcCn5r9tXoC6M -MAqnRzkzTNctMqpIKb3ESMVYUgfEmbHMZtqsKE3BvB9EP6NjYb3/SGF2BLJ9HG6c -7oK792AceqCYvi1wQy9kv9IPICX3x31wBbguvwIDAQABAoIBAQCcwbSPrPRncaeZ -h8LFoO36le16dnqKCZIloMcxNxNNNvo9lyVC8mBgMXLSm+Eab4TTyyf6Nl14ytJc -ZltHOqkqMnp+B9LQ8zNLfDaDCijY+TWtI5bjio5B/S7qdwyXCzii/slv+3SQ+m6a -T4ifCtH//t11QfaEa4v/NphrPjnIeAgB681bk8nKdRop84ar+51lgbHoAza+wv+8 -e+aK3Od8r4yD19ZoPiMg0o4t2cEi8kupVgjsuZVtcvF9Q6QLYV17BFYEHqYjcr18 -N1EJ96f2FLO6cwEM+cG4n8gHjfDGRcDlhT9Cum1kDpg4J88auVUXnrDyi5Dcv1Pz -6EC+ZmXBAoGBAOHUSUDMkbEePKDaM3Z+4jLqZWc3UZhxQLnqg5l7phdQ6iSogQX9 -1LpZCJ+lOMTHBCnaTCoQpuSHgYgraVkD4KG6nzC423oDesd/xNvlfW3TRsmwZWbL -khdcdBSoVy3Kbv1v8kxw0NlcR68qo1XYfmFCAITcFHdxDz/jGStydlR9AoGBAMJH -gyPenL595X8t47R93rkGOIx5cVf5YrDIZCByp4K44Tf9OqZHbky7jSPSSbur10mI -pypRq5EcZ/cudU4w4gGaMauczt5Dgvlqd3T+GTZY3jO8bxi66gvzYTbigAxaJWcY -Uafiv5W9ldRKsY3pyCL8ubg38Ed2cSaS2wGd/SDrAn8NO2MPaO0gc6UZx688QjL+ -yL0oTxV42Snxusv7MkOJGjSd8UGeGEFeqdjXgdbRsNeNnDzaOh+NRGNSlziU/qUq -1MR/FlXF0G5hQhtGxyuSQ87iAnPukf79X21tyG9TP4lBUE3iLLoQAlgw606muQiu -qi9dmYeZeAZst+HBqfNFAoGADg6qmH/VC5uEbY1eeoLZCL5AfTmUT+9FitEVHZvu -LvE9qpVyFvH4Mykm7z6aAzBN5Y4zukYqiddqVmJQLpYu5DrJ+UbhWQe9hFqFxjtU -i7Amc8vgpgNwR+kWUahV547mQe1qiyFHB4iuPKwi6MfPqWhr775sbl9NlKLvodBS -rn0CgYBDrLH6ehNV/RnJIVZYQD6YcocYdYFy4u76mCYKmEP57XmstZHZXQgiRwbK -Oy2Yg/qieKtSMjstgHFK6ZNYIR37l9J9Lh9aeal61+wW2dsGEy29Rhg01FpvKReq -wCHz3tneUyaOhq9m0gKMOpWYcO+FBX1/2K5Gwj8FgEpu9r2b3w== ------END RSA PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/authentication/token/credentials_file.json b/pulsar-broker/src/test/resources/authentication/token/credentials_file.json index db1eccd8eb678..d12e786b7cdb5 100644 --- a/pulsar-broker/src/test/resources/authentication/token/credentials_file.json +++ b/pulsar-broker/src/test/resources/authentication/token/credentials_file.json @@ -1,4 +1,4 @@ { - "client_id":"Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x", - "client_secret":"rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb" + "client_id":"my-admin-role", + "client_secret":"super-secret-client-secret" } diff --git a/pulsar-broker/src/test/resources/certificate/client.crt b/pulsar-broker/src/test/resources/certificate/client.crt deleted file mode 100644 index 2d7d156866a86..0000000000000 --- a/pulsar-broker/src/test/resources/certificate/client.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDVjCCAj4CCQCtw/UnTFDT7DANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MSEw -HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMMBmNsaWVu -dDAeFw0xNjA2MjAwMTQ1NDZaFw0yNjA2MTgwMTQ1NDZaMG0xCzAJBgNVBAYTAkFV -MRMwEQYDVQQIDApTb21lLVN0YXRlMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxITAf -BgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGY2xpZW50 -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqQV5F3Au9FWXIYPdWqiX -Rk5gdVmVkDuuFK4ZoOd8inoJpB3PPkpmpgoVkKQHDFhgx3ODGWIUgo+n6QDsJxY4 -ygHfVeggQgek8iUfteYVsIcHS0bjkhIij/3ihC301FkiqbrV069oLvUXLKcv3zxG -mdBAiz0k4xGZhFieVRvQCLY9syUUxmQ/3Cv42lDY8a1gTw4CRRx/hCfDvXCKhOT4 -bMwUIDZfHB3JoDh3Thp8FLz0nTrRF75mSQJ/OdcafIm0Xoz2Otp/CSxLS+U1lLvG -05crWTDe0om7NW4mK4CqGCFq5gUw7eIzaeO7Q5Qez9XGTMzkgIDTMvNYGGEeJhhm -NQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAKXy4g6hljY5MpO8mbZh+uJHq6NEUs -4dr7OKDDWc39AROZsGf2eFUmHOjmRSw7VHpguGKI+rFRELVffpg/VvMh5apu+DBf -jhxtDNceAyh5uugPNUJHXyeikBDYW8bAzUU3DmMldPkTZWcGjurmyhDQ1TtK2YJe -RMFBXw5aAzdJMNi6OfXDH/ZX32hrb482yghDZj+ndnm0FefmLbFTQRMF8/fIHb1W -kqNHwIaapZwH6j/MJy/TRFYcJunrBUYT9zVjY46k3GU0ex/Bn7T4pg9gzgFGZJhn -jQQFKliIC84thCzdlPkrLduLY8tmlDKpLXatbEQ+s1MmNOURm6irPp6g ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/certificate/client.csr b/pulsar-broker/src/test/resources/certificate/client.csr deleted file mode 100644 index e01f33ef073f6..0000000000000 --- a/pulsar-broker/src/test/resources/certificate/client.csr +++ /dev/null @@ -1,17 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICsjCCAZoCAQAwbTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx -FTATBgNVBAcMDERlZmF1bHQgQ2l0eTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMQ8wDQYDVQQDDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQCpBXkXcC70VZchg91aqJdGTmB1WZWQO64Urhmg53yKegmkHc8+ -SmamChWQpAcMWGDHc4MZYhSCj6fpAOwnFjjKAd9V6CBCB6TyJR+15hWwhwdLRuOS -EiKP/eKELfTUWSKputXTr2gu9Rcspy/fPEaZ0ECLPSTjEZmEWJ5VG9AItj2zJRTG -ZD/cK/jaUNjxrWBPDgJFHH+EJ8O9cIqE5PhszBQgNl8cHcmgOHdOGnwUvPSdOtEX -vmZJAn851xp8ibRejPY62n8JLEtL5TWUu8bTlytZMN7Sibs1biYrgKoYIWrmBTDt -4jNp47tDlB7P1cZMzOSAgNMy81gYYR4mGGY1AgMBAAGgADANBgkqhkiG9w0BAQUF -AAOCAQEAk3eueaq/gonBzKH75oWHlqPbMZQFk4NXqx8h24ZfkCzPEFPyDM+jdQxv -8vDtyWq+fizqAQmGrM7WPHgnTbmZyovfmwuKwtTlkD/8t7XpTmm9fYspbL4WzdP1 -y8/Vug09te+rni+v+kjk5b9IceEy6kLvXuzirE6c4LunAm+thrr5gWmsx1pyDiq7 -W2M15UZrm/paaCg6cVaMFdXCRZP+g1P4NcgDUe2TyFbLlhOJNtX3DJRZWEhrkEYK -mRz2tJuiuitCzheAgRrFXepRagHKYffNSas1n/2kIc9QpZ8654kxsAzEwL7CnHd/ -SHbMS9dfP+uM6DACwcvngSOBMJ9KMg== ------END CERTIFICATE REQUEST----- diff --git a/pulsar-broker/src/test/resources/certificate/client.key b/pulsar-broker/src/test/resources/certificate/client.key deleted file mode 100644 index 34fc701c5257d..0000000000000 --- a/pulsar-broker/src/test/resources/certificate/client.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCpBXkXcC70VZch -g91aqJdGTmB1WZWQO64Urhmg53yKegmkHc8+SmamChWQpAcMWGDHc4MZYhSCj6fp -AOwnFjjKAd9V6CBCB6TyJR+15hWwhwdLRuOSEiKP/eKELfTUWSKputXTr2gu9Rcs -py/fPEaZ0ECLPSTjEZmEWJ5VG9AItj2zJRTGZD/cK/jaUNjxrWBPDgJFHH+EJ8O9 -cIqE5PhszBQgNl8cHcmgOHdOGnwUvPSdOtEXvmZJAn851xp8ibRejPY62n8JLEtL -5TWUu8bTlytZMN7Sibs1biYrgKoYIWrmBTDt4jNp47tDlB7P1cZMzOSAgNMy81gY -YR4mGGY1AgMBAAECggEAcJj3yVhvv0/BhY8+CCYl2K1f7u1GCLbpSleNNTbhLbMM -9yrwo/OWnGg9Y4USOPQrTNOz81X2id+/oSZ/K67PGCvVJ3qi+rny9WkrzdbAfkAF -6O0Jr4arRbeBjkK7Rjc3M1EHH6VLx3R5AsNBzfpuogss5FVQXICd/5+1oscLeLEx -/Fn+51IEn9FUg5vr7ElG51f+zPxexcWHLNoqGjTEIGGtI8/CfTzD9tBV4sIjf/Nc -Zzfs9XYrChfcrS0U1zDa+L7c5gYfoN6M08sBiuZlhyyO9wgzPlp+XnsrSFv6hUta -0scjAbN4bh+orQn6zgFN/sjkQnraWXW7pKFLyTR/IQKBgQDVju4IbhE9XRweNgXi -s3BuGV+HsuFffEf0904/zCuCUcScGb5WCz5+KtlFJ//YxfocHVZajH+4GdCGbWim -m+H3XvRpWgfK/aBNOXu5ueLbnPYyPjTrcpKRsomeoiV+Jz1tv5PQElwzCiCzVvQf -fMyhQT16YIsFQAGJzQMBEHWODQKBgQDKnKps3sKSR3ycUtIxCVXUir7p52qst0Pm -bPO8JrcRKZP2z8MJB96+DcQFzrxj7t5DDktkYEsFOPPuIeUsYXsY+MKHs4hEQVCz -hpDJJNQ8s+SV8TLzKpinZEmLIjslLbn2rQrpqybPg84VxqX3qqM8IrXhMf77aGj6 -QHqvQwHWyQKBgQDF1RVO+9++j82ncvY6z22coKath5leIjxqgtqbISFBJUxUK0j2 -Xo4yxLDnbqmE/8m1V7wSP8tlGYzhquLiTM+kn/Mc0Ukc0503TMQABmJQfXRYkOXn -IwkCLXltWdoPpnwyeeGNRCTjJ0OpvyiBLtRFobE498xxPZzvMdrRlpS/1QKBgQCo -wmMleUnBQ2/kWQugMnFeLg6kjs+IesFAnYFKN0kGL4aB7j06OWbrEFY0rCS4bA6O -9coQGjCCchSjRXI4TB2XCCQnmX8nsuuADNZt45Iv2XrM9XEFn3Y0/tBO5j0zU2nw -r+NGC/uwns050BMPPf7mqNarctQ6HZZK0wgdEQfoGQKBgC+pbkQv9cn68TsiaJ3w -tvNRTXCIAAH4Vtn9Cp+63ao+kXn94BJqQF99i58kJpG4ol6wbCHUoC6fHgxUh5HB -JB0HjC2eCMgn4acAQg0sPW6l35KX36yYxtrL7eosB/yBYum0XAwmboNjEhlCZkOs -YOpSsn61g7xqqrt40Spb5vUn ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/certificate/server.crt b/pulsar-broker/src/test/resources/certificate/server.crt deleted file mode 100644 index 59b651be2a406..0000000000000 --- a/pulsar-broker/src/test/resources/certificate/server.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDLjCCAhYCCQDn/Yvym+FMsDANBgkqhkiG9w0BAQUFADBZMQswCQYDVQQGEwJB -VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTYwNjEzMjIyMTQ2WhcN -MjYwNjExMjIyMTQ2WjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 -ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRIwEAYDVQQDEwls -b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs29IuzZvk -OGUkS/wqKzd/h2esqjCSjw4SLLbeh1GA3UEvh1k9+eRiYwJG1yCOHmcsp4A8Du99 -8xbgeihpWWw7pjL5VVky3ciuvHyz1Cc6bKRps/GzVJBwFP0gzHnK8bUM86U52yGT -1DepD/Y2lURy0igdVcAMjGweMwoTmiaVcwZexfYuEef+jz3fmpmOwP9rboIA9rQr -mTbLzzkbAwZXdl+bRvIefIjIazIzTOs8tJWrhFaTJUgBhhLjFIwTdpS+n+FqOu8J -92K+PvKjIeJ3kmnZyRHK7uidlAn0g/DK+co1sX3zORPCWeg21K+/vVHTj91zARNb -O9hVS4bqqsw9AgMBAAEwDQYJKoZIhvcNAQEFBQADggEBACE0WBuTbHcPtYKv2ZMS -mYk9jvtAhmWHQ6tNqV8CmS2AsrzZdWglGaqIRsm5slkD2BGeQS+BesTArUuENTmP -r9kJSecdiiB8aWtLbhoCSH3QR6IW/b5UVl6sR5OIh7SkNTjMSUSDnMEVLNGyKZGS -gCGVbDf3n5KhOTnwqguELRykynKFt2LVksBia9+88lUtiRHpbyClo/KVWltJlaww -PT0WEpwqVUcHmwrR3MTzJDEPvIplSgxdaDmFGYS1YKm9T/wQd+t/0DbXMmfJXBbd -FVUnB6o7qJVU9N2Tbaj9NbCtwz5nTZG4A5kRXWHVjZsn5WzLuS/me3rDXjwlfB2p -ipY= ------END CERTIFICATE----- diff --git a/pulsar-broker/src/test/resources/certificate/server.csr b/pulsar-broker/src/test/resources/certificate/server.csr deleted file mode 100644 index 8782222c5ab46..0000000000000 --- a/pulsar-broker/src/test/resources/certificate/server.csr +++ /dev/null @@ -1,17 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICnjCCAYYCAQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUx -ITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAxMJbG9j -YWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArNvSLs2b5Dhl -JEv8Kis3f4dnrKowko8OEiy23odRgN1BL4dZPfnkYmMCRtcgjh5nLKeAPA7vffMW -4HooaVlsO6Yy+VVZMt3Irrx8s9QnOmykabPxs1SQcBT9IMx5yvG1DPOlOdshk9Q3 -qQ/2NpVEctIoHVXADIxsHjMKE5omlXMGXsX2LhHn/o8935qZjsD/a26CAPa0K5k2 -y885GwMGV3Zfm0byHnyIyGsyM0zrPLSVq4RWkyVIAYYS4xSME3aUvp/hajrvCfdi -vj7yoyHid5Jp2ckRyu7onZQJ9IPwyvnKNbF98zkTwlnoNtSvv71R04/dcwETWzvY -VUuG6qrMPQIDAQABoAAwDQYJKoZIhvcNAQEFBQADggEBAEPHySnpf3E/7tZsiDka -rqdB/sU7fdqjyV0iy0cuKQkU8WYrsE7bHkqMYc8CiIDfWhIGW5Jnzups2O6eH0Sx -2BS21ARFiNGC1UfY1HSV2zrTNh3RqQa3YsXzv9vvdQ/gjsqGDuGDIc1yAA+Ytdja -3rhIzEVqBhiLzg+M2+gW1zs+Kqj0Zo0pLB2uqhdZJmjxBb2FCli50vCVEhqIS3RO -KTE+AJfxThWIeahFyVaskaEGkS6NVr2JihV0elbKolH19k2UzRTVn7p3Ixh5ojuW -gtU/90vOy/SDkSRmCWMqgkUKJ2oeImleHdrvwNyrzvrLWRAz6R5yGQJwji9kKpHD -FK0= ------END CERTIFICATE REQUEST----- diff --git a/pulsar-broker/src/test/resources/certificate/server.key b/pulsar-broker/src/test/resources/certificate/server.key deleted file mode 100644 index 6da70f5aec3b5..0000000000000 --- a/pulsar-broker/src/test/resources/certificate/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCs29IuzZvkOGUk -S/wqKzd/h2esqjCSjw4SLLbeh1GA3UEvh1k9+eRiYwJG1yCOHmcsp4A8Du998xbg -eihpWWw7pjL5VVky3ciuvHyz1Cc6bKRps/GzVJBwFP0gzHnK8bUM86U52yGT1Dep -D/Y2lURy0igdVcAMjGweMwoTmiaVcwZexfYuEef+jz3fmpmOwP9rboIA9rQrmTbL -zzkbAwZXdl+bRvIefIjIazIzTOs8tJWrhFaTJUgBhhLjFIwTdpS+n+FqOu8J92K+ -PvKjIeJ3kmnZyRHK7uidlAn0g/DK+co1sX3zORPCWeg21K+/vVHTj91zARNbO9hV -S4bqqsw9AgMBAAECggEAd/LuDeZFZ/+uR5qmuAhXMZqfWZSbsges5vW6S/6wkvB1 -vGp6heQzFAbKXKgJgjUcuULeXE6s58RYuppqEnin/1hcBOKxy/dUu9Q14H+2XPdo -u6TPcvaaZ/xYjnr1hNtnHD6yB8zEpxVbLmjSHJxF7Dti9MA9TTfgCrC2LFYKsicD -/5AQyHuwpHyTL3Iiwv4Qtks/SD2a3fu8lD0yTQwA/hY6/0ieXxXd9tZV5a6GSA0P -nieol1byfuX7Q5fb8ggPd9u9K1mVZTBRKiE5w+uU4Ic2IkBmZX5ZuRS+vFplpLsY -YpFPvzFmpNkpK2SdYjJ+V4tkJsFHmOaFRgW/0QB2DQKBgQDeQMSZBQlPUrgRdWHN -OyvTcrSvXzg5DbaIj39tgdNZ6PYns/thD0n707KGRJOChIyYiiKxLxzLWdPUxqQO -rNLUV9IkMVc/QZR8RUqGc2BxmPOxAprhzeOhLsyqP/sgtxRHAnLqmkXuHYoxvTZ6 -LFCRCZBpEJrutGxl3s/x+sfkuwKBgQDHGwnSmvArpL8ZY1dV4xKNkxifCBnNmqAl -TKHPW3odN9nkMECEt1XUIioUUKXUsiAZNp5xa/v1DEyJ4f2T20QKcAGbS18b1M5W -axIoH3IhyLo74tuo0fthgq5bzypfFOlIjo7F9mpEky/461RWmoNAAlp9+FkDi48C -KwjAk39/ZwKBgQDXFJqs8sDFsOlMi+nvsHmDERhmNqG0JN8mXKgWk3KzKc09MuHs -Vd1lBMNZSHfv8NIWtGdKTKty5yUmXm1ZfkoxECPevpkOMCq/8FZksrb8d+YswLae -Gp9U1nNdtrkSOdo3tdj7y/wsqQ2ZgOB9bvEwyq6j3lvw8U2NcAiQxf44DQKBgBHb -lPf0uZHQhutKA61KXoGgLdclrNrKAY8W3nRwqfUw6zQSN9cvcl1Cay/DQ/xdtY9N -XMyjeMezwLGlOU8nnWSqQxqgmfkvDwqlM82xdFUfYcS5RiZQHxHR3L2TSSOaBoph -buDGhyV7ZhQXV0slNJxrGZ6uxZ0RyVPSdEiBcjAFAoGBAJqZ6uCVHpv/FwZVggu7 -Xb9EIxZnLSmXwaXFpJoMZpRpKb8cSTTJbgSMv3Dq2LcNKYXdNBhgKgPSc/XipXt9 -ZdT36KWipV+PzW691kUiWHtA8/+E0LCi4Y7rlcBMz9PgDNXK4XMMZOVKxDqPcHSJ -P6y01ku7T2X+abUiJ334Hg6G ------END PRIVATE KEY----- diff --git a/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf b/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf index d9411e655ad5b..4e2fd40298354 100644 --- a/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf +++ b/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf @@ -29,4 +29,5 @@ authenticationEnabled=true authenticationProviders=org.apache.pulsar.MockTokenAuthenticationProvider brokerClientAuthenticationPlugin= brokerClientAuthenticationParameters= -loadBalancerOverrideBrokerNicSpeedGbps=2 \ No newline at end of file +loadBalancerOverrideBrokerNicSpeedGbps=2 +topicLevelPoliciesEnabled=false \ No newline at end of file diff --git a/pulsar-client-1x-base/pom.xml b/pulsar-client-1x-base/pom.xml index f039c23f92cb3..ebfc492578599 100644 --- a/pulsar-client-1x-base/pom.xml +++ b/pulsar-client-1x-base/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-1x-base Pulsar Client 1.x Compatibility Base pom - pulsar-client-2x-shaded pulsar-client-1x - @@ -61,7 +57,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -77,5 +72,4 @@ - diff --git a/pulsar-client-1x-base/pulsar-client-1x/pom.xml b/pulsar-client-1x-base/pulsar-client-1x/pom.xml index a0a254317bfc8..571e7cfebb96e 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/pom.xml +++ b/pulsar-client-1x-base/pulsar-client-1x/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-client-1x-base 3.0.0-SNAPSHOT .. - pulsar-client-1x Pulsar Client 1.x Compatibility API - ${project.groupId} pulsar-client-2x-shaded ${project.version} - com.google.guava guava - org.apache.commons commons-lang3 - - @@ -70,7 +63,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -90,5 +82,4 @@ - diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java index 3b0efe64cf588..ea2bba166e6c5 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java @@ -368,6 +368,7 @@ public ClientConfiguration setServiceUrl(String serviceUrl) { * @param unit the time unit in which the duration is defined */ public void setConnectionTimeout(int duration, TimeUnit unit) { + checkArgument(duration >= 0); confData.setConnectionTimeoutMs((int) unit.toMillis(duration)); } diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java index 81956db56f774..f2101b287043c 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java @@ -69,6 +69,7 @@ public long getAckTimeoutMillis() { * @return {@link ConsumerConfiguration} */ public ConsumerConfiguration setAckTimeout(long ackTimeout, TimeUnit timeUnit) { + checkArgument(ackTimeout >= 0); long ackTimeoutMillis = timeUnit.toMillis(ackTimeout); checkArgument(ackTimeoutMillis >= minAckTimeoutMillis, "Ack timeout should be should be greater than " + minAckTimeoutMillis + " ms"); diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/Reader.java b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/Reader.java index 451b9fa638203..98fcdb453bb76 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/Reader.java +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/Reader.java @@ -36,14 +36,14 @@ public interface Reader extends Closeable { /** * Read the next message in the topic. * - * @return the next messasge + * @return the next message * @throws PulsarClientException */ Message readNext() throws PulsarClientException; /** * Read the next message in the topic waiting for a maximum of timeout - * time units. Returns null if no message is recieved in that time. + * time units. Returns null if no message is received in that time. * * @return the next message(Could be null if none received in time) * @throws PulsarClientException diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml b/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml index 7938e60bf4330..d191e191d52ca 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-client-1x-base 3.0.0-SNAPSHOT .. - pulsar-client-2x-shaded Pulsar Client 2.x Shaded API - ${project.groupId} @@ -39,10 +36,9 @@ ${project.version} - - + org.apache.maven.plugins maven-shade-plugin @@ -56,16 +52,15 @@ true true false - - org.apache.pulsar:pulsar-client - org.apache.pulsar:pulsar-client-api + io.streamnative:pulsar-client + io.streamnative:pulsar-client-api - org.apache.pulsar:pulsar-client + io.streamnative:pulsar-client ** @@ -93,6 +88,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/pulsar-client-admin-api/pom.xml b/pulsar-client-admin-api/pom.xml index 69c423e86e246..f90b1a21bc439 100644 --- a/pulsar-client-admin-api/pom.xml +++ b/pulsar-client-admin-api/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - - org.apache.pulsar - pulsar - 3.0.0-SNAPSHOT - .. - - - pulsar-client-admin-api - Pulsar Client Admin :: API - - - - ${project.groupId} - pulsar-client-api - ${project.version} - - - - org.slf4j - slf4j-api - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - ${pulsar.client.compiler.release} - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - + + 4.0.0 + + io.streamnative + pulsar + 3.0.0-SNAPSHOT + .. + + pulsar-client-admin-api + Pulsar Client Admin :: API + + + ${project.groupId} + pulsar-client-api + ${project.version} + + + org.slf4j + slf4j-api + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + ${pulsar.client.compiler.release} + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java index 464d02121cfc2..dc0b7c9885a9a 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java @@ -35,7 +35,7 @@ public interface Brokers { /** * Get the list of active brokers in the local cluster. *

- * Get the list of active brokers (web service addresses) in the local cluster. + * Get the list of active brokers (broker ids) in the local cluster. *

* Response Example: * @@ -44,7 +44,7 @@ public interface Brokers { * * * "prod1-broker3.messaging.use.example.com:8080"] * * - * @return a list of (host:port) + * @return a list of broker ids * @throws NotAuthorizedException * You don't have admin permission to get the list of active brokers in the cluster * @throws PulsarAdminException @@ -55,7 +55,7 @@ public interface Brokers { /** * Get the list of active brokers in the local cluster asynchronously. *

- * Get the list of active brokers (web service addresses) in the local cluster. + * Get the list of active brokers (broker ids) in the local cluster. *

* Response Example: * @@ -64,13 +64,13 @@ public interface Brokers { * "prod1-broker3.messaging.use.example.com:8080"] * * - * @return a list of (host:port) + * @return a list of broker ids */ CompletableFuture> getActiveBrokersAsync(); /** * Get the list of active brokers in the cluster. *

- * Get the list of active brokers (web service addresses) in the cluster. + * Get the list of active brokers (broker ids) in the cluster. *

* Response Example: * @@ -81,7 +81,7 @@ public interface Brokers { * * @param cluster * Cluster name - * @return a list of (host:port) + * @return a list of broker ids * @throws NotAuthorizedException * You don't have admin permission to get the list of active brokers in the cluster * @throws NotFoundException @@ -94,7 +94,7 @@ public interface Brokers { /** * Get the list of active brokers in the cluster asynchronously. *

- * Get the list of active brokers (web service addresses) in the cluster. + * Get the list of active brokers (broker ids) in the cluster. *

* Response Example: * @@ -105,7 +105,7 @@ public interface Brokers { * * @param cluster * Cluster name - * @return a list of (host:port) + * @return a list of broker ids */ CompletableFuture> getActiveBrokersAsync(String cluster); @@ -156,11 +156,11 @@ public interface Brokers { * * * @param cluster - * @param brokerUrl + * @param brokerId * @return * @throws PulsarAdminException */ - Map getOwnedNamespaces(String cluster, String brokerUrl) + Map getOwnedNamespaces(String cluster, String brokerId) throws PulsarAdminException; /** @@ -176,10 +176,10 @@ Map getOwnedNamespaces(String cluster, String * * * @param cluster - * @param brokerUrl + * @param brokerId * @return */ - CompletableFuture> getOwnedNamespacesAsync(String cluster, String brokerUrl); + CompletableFuture> getOwnedNamespacesAsync(String cluster, String brokerId); /** * Update a dynamic configuration value into ZooKeeper. @@ -326,10 +326,11 @@ Map getOwnedNamespaces(String cluster, String CompletableFuture healthcheckAsync(TopicVersion topicVersion); /** - * Shutdown current broker gracefully. - * @param maxConcurrentUnloadPerSec - * @param forcedTerminateTopic - * @return + * Trigger the current broker to graceful-shutdown asynchronously. + * + * @param maxConcurrentUnloadPerSec the maximum number of topics to unload per second. + * This helps control the speed of the unload operation during shutdown. + * @param forcedTerminateTopic if true, topics will be forcefully terminated during the shutdown process. */ CompletableFuture shutDownBrokerGracefully(int maxConcurrentUnloadPerSec, boolean forcedTerminateTopic); diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java index f599e2566bffc..156d67e4e58b3 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Topics.java @@ -1361,6 +1361,8 @@ default PartitionedTopicStats getPartitionedStats(String topic, boolean perParti * Set to true to get precise backlog, Otherwise get imprecise backlog. * @param subscriptionBacklogSize * Whether to get backlog size for each subscription. + * @param getEarliestTimeInBacklog + * Whether to get the earliest time in backlog. * @return a future that can be used to track when the partitioned topic statistics are returned */ CompletableFuture getPartitionedStatsAsync( diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java index 8955fe7a0ac78..19e9ff2d15a2b 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java @@ -25,9 +25,11 @@ */ public interface BrokerInfo { String getServiceUrl(); + String getBrokerId(); interface Builder { Builder serviceUrl(String serviceUrl); + Builder brokerId(String brokerId); BrokerInfo build(); } diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java index e4d0a68b50ad0..d77f693c7cd70 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java @@ -31,6 +31,7 @@ @NoArgsConstructor public final class BrokerInfoImpl implements BrokerInfo { private String serviceUrl; + private String brokerId; public static BrokerInfoImplBuilder builder() { return new BrokerInfoImplBuilder(); @@ -38,14 +39,20 @@ public static BrokerInfoImplBuilder builder() { public static class BrokerInfoImplBuilder implements BrokerInfo.Builder { private String serviceUrl; + private String brokerId; public BrokerInfoImplBuilder serviceUrl(String serviceUrl) { this.serviceUrl = serviceUrl; return this; } + public BrokerInfoImplBuilder brokerId(String brokerId) { + this.brokerId = brokerId; + return this; + } + public BrokerInfoImpl build() { - return new BrokerInfoImpl(serviceUrl); + return new BrokerInfoImpl(serviceUrl, brokerId); } } } diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/protocol/schema/IsCompatibilityResponse.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/protocol/schema/IsCompatibilityResponse.java index d5edd96fab563..aa8b54d1a77be 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/protocol/schema/IsCompatibilityResponse.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/protocol/schema/IsCompatibilityResponse.java @@ -33,5 +33,4 @@ public class IsCompatibilityResponse { boolean isCompatibility; String schemaCompatibilityStrategy; - } diff --git a/pulsar-client-admin-shaded/pom.xml b/pulsar-client-admin-shaded/pom.xml index cf669b5869afb..80df41d833f3d 100644 --- a/pulsar-client-admin-shaded/pom.xml +++ b/pulsar-client-admin-shaded/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-admin Pulsar Client Admin - ${project.groupId} @@ -74,7 +70,6 @@ - maven-antrun-plugin @@ -87,15 +82,12 @@ - + - org.apache.maven.plugins maven-shade-plugin @@ -108,11 +100,10 @@ true true - - org.apache.pulsar:pulsar-client-original - org.apache.pulsar:pulsar-client-admin-original + io.streamnative:pulsar-client-original + io.streamnative:pulsar-client-admin-original org.apache.commons:commons-lang3 commons-codec:commons-codec commons-collections:commons-collections @@ -126,11 +117,12 @@ com.fasterxml.jackson.*:* io.netty:* io.netty.incubator:* - org.apache.pulsar:pulsar-common + io.streamnative:pulsar-common org.apache.bookkeeper:* com.yahoo.datasketches:sketches-core org.glassfish.jersey*:* javax.ws.rs:* + javax.xml.bind:jaxb-api jakarta.annotation:* org.glassfish.hk2*:* io.grpc:* @@ -147,7 +139,7 @@ org.yaml:snakeyaml io.swagger:* - org.apache.pulsar:pulsar-client-messagecrypto-bc + io.streamnative:pulsar-client-messagecrypto-bc com.fasterxml.jackson.core:jackson-annotations @@ -155,7 +147,7 @@ - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original ** @@ -165,7 +157,7 @@ - org.apache.pulsar:pulsar-client-admin-original + io.streamnative:pulsar-client-admin-original ** @@ -176,7 +168,7 @@ - + org.asynchttpclient org.apache.pulsar.shade.org.asynchttpclient @@ -230,6 +222,10 @@ javax.annotation org.apache.pulsar.shade.javax.annotation + + javax.xml.bind + org.apache.pulsar.shade.javax.xml.bind + jersey org.apache.pulsar.shade.jersey @@ -254,51 +250,75 @@ org.reactivestreams org.apache.pulsar.shade.org.reactivestreams - - io.grpc - org.apache.pulsar.shade.io.grpc - - - okio - org.apache.pulsar.shade.okio - - - com.squareup - org.apache.pulsar.shade.com.squareup - - - io.opencensus - org.apache.pulsar.shade.io.opencensus - - - org.eclipse.jetty - org.apache.pulsar.shade.org.eclipse.jetty - - - org.objenesis - org.apache.pulsar.shade.org.objenesis - - - org.yaml - org.apache.pulsar.shade.org.yaml - - - io.swagger - org.apache.pulsar.shade.io.swagger - - - org.apache.bookkeeper - org.apache.pulsar.shade.org.apache.bookkeeper - + + io.grpc + org.apache.pulsar.shade.io.grpc + + + okio + org.apache.pulsar.shade.okio + + + com.squareup + org.apache.pulsar.shade.com.squareup + + + io.opencensus + org.apache.pulsar.shade.io.opencensus + + + org.eclipse.jetty + org.apache.pulsar.shade.org.eclipse.jetty + + + org.objenesis + org.apache.pulsar.shade.org.objenesis + + + org.yaml + org.apache.pulsar.shade.org.yaml + + + io.swagger + org.apache.pulsar.shade.io.swagger + + + org.apache.bookkeeper + org.apache.pulsar.shade.org.apache.bookkeeper + - - + + + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/pulsar-client-admin/pom.xml b/pulsar-client-admin/pom.xml index 9436f9c1ce245..c601934505200 100644 --- a/pulsar-client-admin/pom.xml +++ b/pulsar-client-admin/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-admin-original Pulsar Client Admin Original - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-client-admin-api ${project.version} - org.glassfish.jersey.core jersey-client - org.glassfish.jersey.media jersey-media-json-jackson - jakarta.activation jakarta.activation-api - org.glassfish.jersey.media jersey-media-multipart - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - org.glassfish.jersey.inject jersey-hk2 - javax.xml.bind jaxb-api @@ -91,30 +79,25 @@ javax.activation runtime - com.google.guava guava - com.google.code.gson gson - ${project.groupId} pulsar-package-core ${project.version} - org.hamcrest hamcrest test - @@ -141,7 +124,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java index 0e6296724b3da..7b4ebb1778d8e 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java @@ -75,15 +75,15 @@ public CompletableFuture getLeaderBrokerAsync() { } @Override - public Map getOwnedNamespaces(String cluster, String brokerUrl) + public Map getOwnedNamespaces(String cluster, String brokerId) throws PulsarAdminException { - return sync(() -> getOwnedNamespacesAsync(cluster, brokerUrl)); + return sync(() -> getOwnedNamespacesAsync(cluster, brokerId)); } @Override public CompletableFuture> getOwnedNamespacesAsync( - String cluster, String brokerUrl) { - WebTarget path = adminBrokers.path(cluster).path(brokerUrl).path("ownedNamespaces"); + String cluster, String brokerId) { + WebTarget path = adminBrokers.path(cluster).path(brokerId).path("ownedNamespaces"); return asyncGetRequest(path, new FutureCallback>(){}); } diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java index 009fa67fbaa29..a9d913c016490 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.admin.internal; +import static com.google.common.base.Preconditions.checkArgument; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -200,18 +201,21 @@ public PulsarAdminBuilder tlsProtocols(Set tlsProtocols) { @Override public PulsarAdminBuilder connectionTimeout(int connectionTimeout, TimeUnit connectionTimeoutUnit) { + checkArgument(connectionTimeout >= 0); this.conf.setConnectionTimeoutMs((int) connectionTimeoutUnit.toMillis(connectionTimeout)); return this; } @Override public PulsarAdminBuilder readTimeout(int readTimeout, TimeUnit readTimeoutUnit) { + checkArgument(readTimeout >= 0); this.conf.setReadTimeoutMs((int) readTimeoutUnit.toMillis(readTimeout)); return this; } @Override public PulsarAdminBuilder requestTimeout(int requestTimeout, TimeUnit requestTimeoutUnit) { + checkArgument(requestTimeout >= 0); this.conf.setRequestTimeoutMs((int) requestTimeoutUnit.toMillis(requestTimeout)); return this; } diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java index 33d1cd1785827..2d9754a0748df 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java @@ -964,21 +964,7 @@ public CompletableFuture truncateAsync(String topic) { @Override public CompletableFuture> getMessageByIdAsync(String topic, long ledgerId, long entryId) { - CompletableFuture> future = new CompletableFuture<>(); - getRemoteMessageById(topic, ledgerId, entryId).handle((r, ex) -> { - if (ex != null) { - if (ex instanceof NotFoundException) { - log.warn("Exception '{}' occurred while trying to get message.", ex.getMessage()); - future.complete(r); - } else { - future.completeExceptionally(ex); - } - return null; - } - future.complete(r); - return null; - }); - return future; + return getRemoteMessageById(topic, ledgerId, entryId); } private CompletableFuture> getRemoteMessageById(String topic, long ledgerId, long entryId) { diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java index 5693ebc8f60aa..835fecfc8e195 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java @@ -164,6 +164,7 @@ public TransactionPendingAckStats getPendingAckStats(String topic, String subNam @Override public CompletableFuture> getSlowTransactionsByCoordinatorIdAsync( Integer coordinatorId, long timeout, TimeUnit timeUnit) { + checkArgument(timeout >= 0); WebTarget path = adminV3Transactions.path("slowTransactions"); path = path.path(timeUnit.toMillis(timeout) + ""); if (coordinatorId != null) { diff --git a/pulsar-client-all/pom.xml b/pulsar-client-all/pom.xml index 73621954e1fb8..8bd4d350031dc 100644 --- a/pulsar-client-all/pom.xml +++ b/pulsar-client-all/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-all Pulsar Client All - ${project.groupId} @@ -70,7 +67,6 @@ test - @@ -108,7 +104,6 @@ - maven-antrun-plugin @@ -121,15 +116,12 @@ - + - org.apache.maven.plugins @@ -146,11 +138,10 @@ true true false - - org.apache.pulsar:pulsar-client-original - org.apache.pulsar:pulsar-client-admin-original + io.streamnative:pulsar-client-original + io.streamnative:pulsar-client-admin-original org.apache.commons:commons-lang3 commons-codec:commons-codec commons-collections:commons-collections @@ -176,12 +167,12 @@ commons-*:* io.swagger:* io.airlift:* - - org.apache.pulsar:pulsar-common + io.streamnative:pulsar-common org.apache.bookkeeper:* com.yahoo.datasketches:sketches-core org.glassfish.jersey*:* javax.ws.rs:* + javax.xml.bind:jaxb-api jakarta.annotation:* org.glassfish.hk2*:* io.grpc:* @@ -204,7 +195,7 @@ org.apache.commons:commons-compress org.tukaani:xz - org.apache.pulsar:pulsar-client-messagecrypto-bc + io.streamnative:pulsar-client-messagecrypto-bc com.fasterxml.jackson.core:jackson-annotations @@ -212,7 +203,7 @@ - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original ** @@ -285,6 +276,10 @@ javax.annotation org.apache.pulsar.shade.javax.annotation + + javax.xml.bind + org.apache.pulsar.shade.javax.xml.bind + jersey org.apache.pulsar.shade.jersey @@ -310,24 +305,24 @@ org.apache.pulsar.shade.org.glassfish - io.grpc - org.apache.pulsar.shade.io.grpc + io.grpc + org.apache.pulsar.shade.io.grpc - okio - org.apache.pulsar.shade.okio + okio + org.apache.pulsar.shade.okio - com.squareup - org.apache.pulsar.shade.com.squareup + com.squareup + org.apache.pulsar.shade.com.squareup - io.opencensus - org.apache.pulsar.shade.io.opencensus + io.opencensus + org.apache.pulsar.shade.io.opencensus - org.eclipse.jetty - org.apache.pulsar.shade.org.eclipse.jetty + org.eclipse.jetty + org.apache.pulsar.shade.org.eclipse.jetty org.objenesis @@ -384,14 +379,13 @@ - - + + - - - 4.0.0 - - - org.apache.pulsar - pulsar - 3.0.0-SNAPSHOT - .. - - - pulsar-client-api - Pulsar Client :: API - - - - - - com.google.protobuf - protobuf-java - provided - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - ${pulsar.client.compiler.release} - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - + + com.google.protobuf + protobuf-java + provided + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + ${pulsar.client.compiler.release} + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java index 14a94cb8286dc..01c9fc4a86477 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java @@ -126,7 +126,7 @@ public interface ConsumerBuilder extends Cloneable { ConsumerBuilder topics(List topicNames); /** - * Specify a pattern for topics that this consumer subscribes to. + * Specify a pattern for topics(not contains the partition suffix) that this consumer subscribes to. * *

The pattern is applied to subscribe to all topics, within a single namespace, that match the * pattern. @@ -134,13 +134,13 @@ public interface ConsumerBuilder extends Cloneable { *

The consumer automatically subscribes to topics created after itself. * * @param topicsPattern - * a regular expression to select a list of topics to subscribe to + * a regular expression to select a list of topics(not contains the partition suffix) to subscribe to * @return the consumer builder instance */ ConsumerBuilder topicsPattern(Pattern topicsPattern); /** - * Specify a pattern for topics that this consumer subscribes to. + * Specify a pattern for topics(not contains the partition suffix) that this consumer subscribes to. * *

It accepts a regular expression that is compiled into a pattern internally. E.g., * "persistent://public/default/pattern-topic-.*" @@ -151,7 +151,7 @@ public interface ConsumerBuilder extends Cloneable { *

The consumer automatically subscribes to topics created after itself. * * @param topicsPattern - * given regular expression for topics pattern + * given regular expression for topics(not contains the partition suffix) pattern * @return the consumer builder instance */ ConsumerBuilder topicsPattern(String topicsPattern); diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java index 9409eefe2e0f0..22a97571e532e 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java @@ -344,6 +344,22 @@ public TopicDoesNotExistException(String msg) { } } + /** + * Not found subscription that cannot be created. + */ + public static class SubscriptionNotFoundException extends PulsarClientException { + /** + * Constructs an {@code SubscriptionNotFoundException} with the specified detail message. + * + * @param msg + * The detail message (which is saved for later retrieval + * by the {@link #getMessage()} method) + */ + public SubscriptionNotFoundException(String msg) { + super(msg); + } + } + /** * Lookup exception thrown by Pulsar client. */ @@ -1159,6 +1175,7 @@ public static boolean isRetriableError(Throwable t) { || t instanceof NotFoundException || t instanceof IncompatibleSchemaException || t instanceof TopicDoesNotExistException + || t instanceof SubscriptionNotFoundException || t instanceof UnsupportedAuthenticationException || t instanceof InvalidMessageException || t instanceof InvalidTopicNameException diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/TopicMessageId.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/TopicMessageId.java index b70267bb0fb8b..4d02a7f4096d6 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/TopicMessageId.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/TopicMessageId.java @@ -18,7 +18,7 @@ */ package org.apache.pulsar.client.api; -import java.util.BitSet; +import org.apache.pulsar.client.internal.DefaultImplementation; /** * The MessageId used for a consumer that subscribes multiple topics or partitioned topics. @@ -45,84 +45,6 @@ static TopicMessageId create(String topic, MessageId messageId) { if (messageId instanceof TopicMessageId) { return (TopicMessageId) messageId; } - return new Impl(topic, messageId); - } - - /** - * The simplest implementation of a TopicMessageId interface. - */ - class Impl implements MessageIdAdv, TopicMessageId { - private final String topic; - private final MessageIdAdv messageId; - - public Impl(String topic, MessageId messageId) { - this.topic = topic; - this.messageId = (MessageIdAdv) messageId; - } - - @Override - public byte[] toByteArray() { - return messageId.toByteArray(); - } - - @Override - public String getOwnerTopic() { - return topic; - } - - @Override - public long getLedgerId() { - return messageId.getLedgerId(); - } - - @Override - public long getEntryId() { - return messageId.getEntryId(); - } - - @Override - public int getPartitionIndex() { - return messageId.getPartitionIndex(); - } - - @Override - public int getBatchIndex() { - return messageId.getBatchIndex(); - } - - @Override - public int getBatchSize() { - return messageId.getBatchSize(); - } - - @Override - public BitSet getAckSet() { - return messageId.getAckSet(); - } - - @Override - public MessageIdAdv getFirstChunkMessageId() { - return messageId.getFirstChunkMessageId(); - } - - @Override - public int compareTo(MessageId o) { - return messageId.compareTo(o); - } - - @Override - public boolean equals(Object obj) { - return messageId.equals(obj); - } - - @Override - public int hashCode() { - return messageId.hashCode(); - } - - @Override - public String toString() { - return messageId.toString(); - } + return DefaultImplementation.getDefaultImplementation().newTopicMessageId(topic, messageId); } } diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java index 875a793023523..8fd05bff265f1 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java @@ -37,6 +37,7 @@ import org.apache.pulsar.client.api.MessagePayloadFactory; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.TopicMessageId; import org.apache.pulsar.client.api.schema.GenericRecord; import org.apache.pulsar.client.api.schema.GenericSchema; import org.apache.pulsar.client.api.schema.RecordSchemaBuilder; @@ -252,4 +253,6 @@ static byte[] getBytes(ByteBuffer byteBuffer) { SchemaInfo newSchemaInfoImpl(String name, byte[] schema, SchemaType type, long timestamp, Map propertiesValue); + + TopicMessageId newTopicMessageId(String topic, MessageId messageId); } diff --git a/pulsar-client-api/src/main/resources/findbugsExclude.xml b/pulsar-client-api/src/main/resources/findbugsExclude.xml index 9d73ac29a7bdb..353f01a7bb285 100644 --- a/pulsar-client-api/src/main/resources/findbugsExclude.xml +++ b/pulsar-client-api/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-client-auth-athenz/pom.xml b/pulsar-client-auth-athenz/pom.xml index 3cdbdb48a2935..aba76d9cc261f 100644 --- a/pulsar-client-auth-athenz/pom.xml +++ b/pulsar-client-auth-athenz/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-auth-athenz jar Athenz authentication plugin for java client - - ${project.groupId} pulsar-client-original ${project.parent.version} true - com.yahoo.athenz athenz-zts-java-client-core - com.yahoo.athenz athenz-cert-refresher - + + org.bouncycastle + bcpkix-jdk18on + com.google.guava guava - org.apache.commons commons-lang3 - @@ -72,7 +67,6 @@ ${pulsar.client.compiler.release} - org.apache.maven.plugins maven-enforcer-plugin @@ -103,7 +97,6 @@ - org.gaul modernizer-maven-plugin @@ -121,7 +114,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -139,7 +131,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml b/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml +++ b/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-auth-sasl jar SASL authentication plugin for java client - - ${project.groupId} pulsar-client-original ${project.parent.version} true - com.google.guava guava - org.apache.commons commons-lang3 - org.projectlombok lombok - javax.ws.rs javax.ws.rs-api - org.glassfish.jersey.core jersey-client - - @@ -78,7 +67,6 @@ ${pulsar.client.compiler.release} - org.apache.maven.plugins maven-enforcer-plugin @@ -109,7 +97,6 @@ - org.gaul modernizer-maven-plugin @@ -127,7 +114,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-client-messagecrypto-bc/pom.xml b/pulsar-client-messagecrypto-bc/pom.xml index 6292d99003dd6..341e466d8c611 100644 --- a/pulsar-client-messagecrypto-bc/pom.xml +++ b/pulsar-client-messagecrypto-bc/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-messagecrypto-bc jar Message crypto for End to End encryption with BouncyCastleProvider - ${project.groupId} @@ -40,7 +37,6 @@ ${project.parent.version} provided - ${project.groupId} bouncy-castle-bc @@ -48,9 +44,7 @@ pkg true - - diff --git a/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java b/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java index 2d7b779fa7b6c..146f066ae2c6b 100644 --- a/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java +++ b/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java @@ -35,6 +35,7 @@ import java.security.PublicKey; import java.security.SecureRandom; import java.security.Security; +import java.security.spec.AlgorithmParameterSpec; import java.security.spec.InvalidKeySpecException; import java.util.HashMap; import java.util.List; @@ -73,6 +74,7 @@ import org.bouncycastle.jce.spec.ECParameterSpec; import org.bouncycastle.jce.spec.ECPrivateKeySpec; import org.bouncycastle.jce.spec.ECPublicKeySpec; +import org.bouncycastle.jce.spec.IESParameterSpec; import org.bouncycastle.openssl.PEMException; import org.bouncycastle.openssl.PEMKeyPair; import org.bouncycastle.openssl.PEMParser; @@ -172,6 +174,7 @@ public SecretKey load(ByteBuffer key) { dataKey = keyGenerator.generateKey(); iv = new byte[IV_LEN]; + } private PublicKey loadPublicKey(byte[] keyBytes) throws Exception { @@ -322,22 +325,27 @@ private void addPublicKeyCipher(String keyName, CryptoKeyReader keyReader) throw byte[] encryptedKey; try { - + AlgorithmParameterSpec params = null; // Encrypt data key using public key if (RSA.equals(pubKey.getAlgorithm())) { dataKeyCipher = Cipher.getInstance(RSA_TRANS, BouncyCastleProvider.PROVIDER_NAME); } else if (ECDSA.equals(pubKey.getAlgorithm())) { dataKeyCipher = Cipher.getInstance(ECIES, BouncyCastleProvider.PROVIDER_NAME); + params = createIESParameterSpec(); } else { String msg = logCtx + "Unsupported key type " + pubKey.getAlgorithm() + " for key " + keyName; log.error(msg); throw new PulsarClientException.CryptoException(msg); } - dataKeyCipher.init(Cipher.ENCRYPT_MODE, pubKey); + if (params != null) { + dataKeyCipher.init(Cipher.ENCRYPT_MODE, pubKey, params); + } else { + dataKeyCipher.init(Cipher.ENCRYPT_MODE, pubKey); + } encryptedKey = dataKeyCipher.doFinal(dataKey.getEncoded()); } catch (IllegalBlockSizeException | BadPaddingException | NoSuchAlgorithmException | NoSuchProviderException - | NoSuchPaddingException | InvalidKeyException e) { + | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { log.error("{} Failed to encrypt data key {}. {}", logCtx, keyName, e.getMessage()); throw new PulsarClientException.CryptoException(e.getMessage()); } @@ -345,6 +353,13 @@ private void addPublicKeyCipher(String keyName, CryptoKeyReader keyReader) throw encryptedDataKeyMap.put(keyName, eki); } + // required since Bouncycastle 1.72 when using ECIES, it is required to pass in an IESParameterSpec + private IESParameterSpec createIESParameterSpec() { + // the IESParameterSpec to use was discovered by debugging BouncyCastle 1.69 and running the + // test org.apache.pulsar.client.api.SimpleProducerConsumerTest#testCryptoWithChunking + return new IESParameterSpec(null, null, 128); + } + /* * Remove a key

Remove the key identified by the keyName from the list of keys.

* @@ -474,23 +489,28 @@ private boolean decryptDataKey(String keyName, byte[] encryptedDataKey, List - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client Pulsar Client Java - ${project.groupId} @@ -49,11 +46,8 @@ ${project.version} - - - org.apache.maven.plugins maven-dependency-plugin @@ -89,7 +83,6 @@ - maven-antrun-plugin @@ -102,15 +95,12 @@ - + - org.apache.maven.plugins @@ -125,10 +115,9 @@ true true false - - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original org.apache.bookkeeper:* org.apache.commons:commons-lang3 commons-codec:commons-codec @@ -154,19 +143,17 @@ commons-*:* io.swagger:* io.airlift:* - - org.apache.pulsar:pulsar-common + io.streamnative:pulsar-common com.yahoo.datasketches:sketches-core org.objenesis:* org.yaml:snakeyaml - org.apache.avro:* com.thoughtworks.paranamer:paranamer org.apache.commons:commons-compress org.tukaani:xz - org.apache.pulsar:pulsar-client-messagecrypto-bc + io.streamnative:pulsar-client-messagecrypto-bc com.fasterxml.jackson.core:jackson-annotations @@ -174,7 +161,7 @@ - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original ** @@ -302,14 +289,13 @@ - - + + - com.github.spotbugs spotbugs-maven-plugin @@ -323,7 +309,6 @@ - - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-tools-api Pulsar Client Tools API Pulsar Client Tools API - ${project.groupId} @@ -66,7 +64,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -82,5 +79,4 @@ - diff --git a/pulsar-client-tools-customcommand-example/pom.xml b/pulsar-client-tools-customcommand-example/pom.xml index f1f9180e2d550..407b5436fdc73 100644 --- a/pulsar-client-tools-customcommand-example/pom.xml +++ b/pulsar-client-tools-customcommand-example/pom.xml @@ -1,3 +1,4 @@ + - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. @@ -31,7 +32,7 @@ Pulsar CLI Custom command example - org.apache.pulsar + io.streamnative pulsar-client-tools-api ${project.version} provided @@ -64,13 +65,6 @@ true - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - diff --git a/pulsar-client-tools-test/pom.xml b/pulsar-client-tools-test/pom.xml index 9edb3c1129ed5..90dcb5da39002 100644 --- a/pulsar-client-tools-test/pom.xml +++ b/pulsar-client-tools-test/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-tools-test Pulsar Client Tools Test Pulsar Client Tools Test - ${project.groupId} @@ -89,7 +87,6 @@ - org.apache.maven.plugins maven-deploy-plugin @@ -126,8 +123,8 @@ copy filters - - + + diff --git a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java index ccae1b1176527..18ccddd40d6e9 100644 --- a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java +++ b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java @@ -2355,6 +2355,11 @@ void schemas() throws Exception { PostSchemaPayload input = new ObjectMapper().readValue(new File(schemaFile), PostSchemaPayload.class); verify(schemas).createSchema("persistent://tn1/ns1/tp1", input); + cmdSchemas = new CmdSchemas(() -> admin); + cmdSchemas.run(split("compatibility -f " + schemaFile + " persistent://tn1/ns1/tp1")); + input = new ObjectMapper().readValue(new File(schemaFile), PostSchemaPayload.class); + verify(schemas).testCompatibility("persistent://tn1/ns1/tp1", input); + cmdSchemas = new CmdSchemas(() -> admin); String jarFile = PulsarAdminToolTest.class.getClassLoader() .getResource("dummyexamples.jar").getFile(); diff --git a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java index 8d416125fd1b3..4ea70bd9af929 100644 --- a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java +++ b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java @@ -111,6 +111,7 @@ public void testNonDurableSubscribe() throws Exception { properties.setProperty("useTls", "false"); final String topicName = getTopicWithRandomSuffix("non-durable"); + admin.topics().createNonPartitionedTopic(topicName); int numberOfMessages = 10; @Cleanup("shutdownNow") @@ -202,6 +203,7 @@ public void testRead() throws Exception { properties.setProperty("useTls", "false"); final String topicName = getTopicWithRandomSuffix("reader"); + admin.topics().createNonPartitionedTopic(topicName); int numberOfMessages = 10; @Cleanup("shutdownNow") @@ -251,6 +253,7 @@ public void testEncryption() throws Exception { properties.setProperty("useTls", "false"); final String topicName = getTopicWithRandomSuffix("encryption"); + admin.topics().createNonPartitionedTopic(topicName); final String keyUriBase = "file:../pulsar-broker/src/test/resources/certificate/"; final int numberOfMessages = 10; @@ -328,9 +331,8 @@ public void testArgs() throws Exception { PulsarClientTool pulsarClientTool = new PulsarClientTool(new Properties()); final String url = "pulsar+ssl://localhost:6651"; final String authPlugin = "org.apache.pulsar.client.impl.auth.AuthenticationTls"; - final String authParams = "tlsCertFile:pulsar-broker/src/test/resources/authentication/tls/client-cert.pem," + - "tlsKeyFile:pulsar-broker/src/test/resources/authentication/tls/client-key.pem"; - final String tlsTrustCertsFilePath = "pulsar/pulsar-broker/src/test/resources/authentication/tls/cacert.pem"; + final String authParams = String.format("tlsCertFile:%s,tlsKeyFile:%s", getTlsFileForClient("admin.cert"), + getTlsFileForClient("admin.key-pk8")); final String message = "test msg"; final int numberOfMessages = 1; final String topicName = getTopicWithRandomSuffix("test-topic"); @@ -338,11 +340,11 @@ public void testArgs() throws Exception { String[] args = {"--url", url, "--auth-plugin", authPlugin, "--auth-params", authParams, - "--tlsTrustCertsFilePath", tlsTrustCertsFilePath, + "--tlsTrustCertsFilePath", CA_CERT_FILE_PATH, "produce", "-m", message, "-n", Integer.toString(numberOfMessages), topicName}; pulsarClientTool.jcommander.parse(args); - assertEquals(pulsarClientTool.rootParams.getTlsTrustCertsFilePath(), tlsTrustCertsFilePath); + assertEquals(pulsarClientTool.rootParams.getTlsTrustCertsFilePath(), CA_CERT_FILE_PATH); assertEquals(pulsarClientTool.rootParams.getAuthParams(), authParams); assertEquals(pulsarClientTool.rootParams.getAuthPluginClassName(), authPlugin); assertEquals(pulsarClientTool.rootParams.getServiceURL(), url); diff --git a/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml b/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml +++ b/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-tools Pulsar Client Tools Pulsar Client Tools - com.beust @@ -108,7 +106,6 @@ io.swagger swagger-core - ${project.groupId} @@ -116,9 +113,8 @@ ${project.version} test - - org.apache.pulsar + io.streamnative pulsar-io-batch-discovery-triggerers ${project.version} test @@ -136,9 +132,7 @@ org.apache.commons commons-text - - @@ -158,7 +152,6 @@ - @@ -178,7 +171,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -194,5 +186,4 @@ - diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBase.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBase.java index ce2c44ec1e827..381bc8abcaa3f 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBase.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBase.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.admin.cli; +import static org.apache.pulsar.client.admin.internal.BaseResource.getApiException; import com.beust.jcommander.DefaultUsageFormatter; import com.beust.jcommander.IUsageFormatter; import com.beust.jcommander.JCommander; @@ -26,10 +27,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.function.Supplier; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.admin.PulsarAdminException.ConnectException; +import org.apache.pulsar.client.admin.internal.PulsarAdminImpl; public abstract class CmdBase { protected final JCommander jcommander; @@ -37,6 +43,12 @@ public abstract class CmdBase { private PulsarAdmin admin; private IUsageFormatter usageFormatter; + /** + * Default read timeout in milliseconds. + * Used if not found from configuration data in {@link #getReadTimeoutMs()} + */ + private static final long DEFAULT_READ_TIMEOUT_MILLIS = 60000; + @Parameter(names = { "--help", "-h" }, help = true, hidden = true) private boolean help = false; @@ -124,6 +136,28 @@ protected PulsarAdmin getAdmin() { return admin; } + protected long getReadTimeoutMs() { + PulsarAdmin pulsarAdmin = getAdmin(); + if (pulsarAdmin instanceof PulsarAdminImpl) { + return ((PulsarAdminImpl) pulsarAdmin).getClientConfigData().getReadTimeoutMs(); + } + return DEFAULT_READ_TIMEOUT_MILLIS; + } + + protected T sync(Supplier> executor) throws PulsarAdminException { + try { + return executor.get().get(getReadTimeoutMs(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new PulsarAdminException(e); + } catch (TimeoutException e) { + throw new PulsarAdminException.TimeoutException(e); + } catch (ExecutionException e) { + throw PulsarAdminException.wrap(getApiException(e.getCause())); + } catch (Exception e) { + throw PulsarAdminException.wrap(getApiException(e)); + } + } static Map parseListKeyValueMap(List metadata) { Map map = null; diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBrokers.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBrokers.java index 1e86edcf59c60..f1571e96c65c9 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBrokers.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBrokers.java @@ -150,8 +150,8 @@ private class ShutDownBrokerGracefully extends CliCommand { @Override void run() throws Exception { - getAdmin().brokers().shutDownBrokerGracefully(maxConcurrentUnloadPerSec, forcedTerminateTopic); - System.out.println("Successfully trigger broker shutdown gracefully"); + sync(() -> getAdmin().brokers().shutDownBrokerGracefully(maxConcurrentUnloadPerSec, forcedTerminateTopic)); + System.out.println("Successfully shutdown broker gracefully"); } } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java index 05bab9c6f198b..d951abe0fb2a8 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java @@ -1030,6 +1030,10 @@ void runCmd() throws Exception { updateOptions.setUpdateAuthData(updateAuthData); if (Utils.isFunctionPackageUrlSupported(functionConfig.getJar())) { getAdmin().functions().updateFunctionWithUrl(functionConfig, functionConfig.getJar(), updateOptions); + } else if (Utils.isFunctionPackageUrlSupported(functionConfig.getPy())) { + getAdmin().functions().updateFunctionWithUrl(functionConfig, functionConfig.getPy(), updateOptions); + } else if (Utils.isFunctionPackageUrlSupported(functionConfig.getGo())) { + getAdmin().functions().updateFunctionWithUrl(functionConfig, functionConfig.getGo(), updateOptions); } else { getAdmin().functions().updateFunction(functionConfig, userCodeFile, updateOptions); } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java index 998591f8177d1..9c32041a36503 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java @@ -1962,7 +1962,8 @@ private class GetOffloadThreshold extends CliCommand { @Override void run() throws PulsarAdminException { String namespace = validateNamespace(params); - print(getAdmin().namespaces().getOffloadThreshold(namespace)); + print("offloadThresholdInBytes: " + getAdmin().namespaces().getOffloadThreshold(namespace)); + print("offloadThresholdInSeconds: " + getAdmin().namespaces().getOffloadThresholdInSeconds(namespace)); } } @@ -1980,11 +1981,18 @@ private class SetOffloadThreshold extends CliCommand { required = true) private String thresholdStr = "-1"; + @Parameter(names = {"--time", "-t"}, + description = "Maximum number of seconds stored on the pulsar cluster for a topic" + + " before the broker will start offloading to longterm storage (eg: 10m, 5h, 3d, 2w).") + private String thresholdInSeconds = "-1"; + @Override void run() throws PulsarAdminException { String namespace = validateNamespace(params); long threshold = validateSizeString(thresholdStr); + long timeInSeconds = RelativeTimeUtil.parseRelativeTimeInSeconds(thresholdInSeconds); getAdmin().namespaces().setOffloadThreshold(namespace, threshold); + getAdmin().namespaces().setOffloadThresholdInSeconds(namespace, timeInSeconds); } } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java index 5383e39807b05..44ac143e3507e 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java @@ -42,6 +42,7 @@ public CmdSchemas(Supplier admin) { jcommander.addCommand("delete", new DeleteSchema()); jcommander.addCommand("upload", new UploadSchema()); jcommander.addCommand("extract", new ExtractSchema()); + jcommander.addCommand("compatibility", new TestCompatibility()); } @Parameters(commandDescription = "Get the schema for a topic") @@ -164,4 +165,20 @@ void run() throws Exception { } } + @Parameters(commandDescription = "Test schema compatibility") + private class TestCompatibility extends CliCommand { + @Parameter(description = "persistent://tenant/namespace/topic", required = true) + private java.util.List params; + + @Parameter(names = { "-f", "--filename" }, description = "filename", required = true) + private String schemaFileName; + + @Override + void run() throws Exception { + String topic = validateTopicName(params); + PostSchemaPayload input = MAPPER.readValue(new File(schemaFileName), PostSchemaPayload.class); + getAdmin().schemas().testCompatibility(topic, input); + } + } + } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java index ca0a8a055cfc9..6844504223989 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java @@ -312,7 +312,7 @@ private static void exit(int code) { if (allowSystemExit) { // we are using halt and not System.exit, we do not mind about shutdown hooks // they are only slowing down the tool - ShutdownUtil.triggerImmediateForcefulShutdown(code); + ShutdownUtil.triggerImmediateForcefulShutdown(code, false); } else { System.out.println("Exit code is " + code + " (System.exit not called, as we are in test mode)"); } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdConsume.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdConsume.java index 58ab6360a17bb..0c65604cbe6b8 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdConsume.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdConsume.java @@ -109,6 +109,9 @@ public class CmdConsume extends AbstractCmdConsume { @Parameter(names = { "-pm", "--pool-messages" }, description = "Use the pooled message", arity = 1) private boolean poolMessages = true; + @Parameter(names = {"-rs", "--replicated" }, description = "Whether the subscription status should be replicated") + private boolean replicateSubscriptionState = false; + public CmdConsume() { // Do nothing super(); @@ -156,7 +159,8 @@ private int consume(String topic) { .subscriptionType(subscriptionType) .subscriptionMode(subscriptionMode) .subscriptionInitialPosition(subscriptionInitialPosition) - .poolMessages(poolMessages); + .poolMessages(poolMessages) + .replicateSubscriptionState(replicateSubscriptionState); if (isRegex) { builder.topicsPattern(Pattern.compile(topic)); diff --git a/pulsar-client/pom.xml b/pulsar-client/pom.xml index 9d0dd08eecf10..72bbe1d389218 100644 --- a/pulsar-client/pom.xml +++ b/pulsar-client/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-client-original Pulsar Client Java - ${project.groupId} pulsar-client-api ${project.parent.version} - ${project.groupId} pulsar-common ${project.parent.version} - ${project.groupId} bouncy-castle-bc ${project.parent.version} pkg - ${project.groupId} @@ -59,7 +53,6 @@ ${project.parent.version} true - io.netty netty-codec-http @@ -72,7 +65,6 @@ io.netty netty-codec-socks - io.netty netty-resolver-dns @@ -87,50 +79,40 @@ netty-resolver-dns-native-macos osx-x86_64 - org.apache.commons commons-lang3 - org.asynchttpclient async-http-client - com.typesafe.netty netty-reactive-streams - org.slf4j slf4j-api - commons-codec commons-codec - com.yahoo.datasketches sketches-core - com.google.code.gson gson - - org.apache.avro avro ${avro.version} - org.apache.avro avro-protobuf @@ -142,36 +124,30 @@ - joda-time joda-time provided - com.google.protobuf protobuf-java provided - com.fasterxml.jackson.module jackson-module-jsonSchema - net.jcip jcip-annotations - com.github.spotbugs spotbugs-annotations provided true - ${project.groupId} @@ -179,22 +155,18 @@ ${project.parent.version} test - org.skyscreamer jsonassert ${skyscreamer.version} test - org.awaitility awaitility test - - @@ -228,7 +200,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -242,7 +213,6 @@ - org.xolstice.maven.plugins protobuf-maven-plugin diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java index 1827142cdfa2b..8c17d8fcb253c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java @@ -35,7 +35,6 @@ public abstract class AbstractBatchMessageContainer implements BatchMessageConta protected CompressionType compressionType; protected CompressionCodec compressor; protected String topicName; - protected String producerName; protected ProducerImpl producer; protected int maxNumMessagesInBatch; @@ -54,6 +53,7 @@ public abstract class AbstractBatchMessageContainer implements BatchMessageConta // allocate a new buffer that can hold the entire batch without needing costly reallocations protected int maxBatchSize = INITIAL_BATCH_BUFFER_SIZE; protected int maxMessagesNum = INITIAL_MESSAGES_NUM; + private volatile long firstAddedTimestamp = 0L; @Override public boolean haveEnoughSpace(MessageImpl msg) { @@ -108,7 +108,6 @@ public ProducerImpl.OpSendMsg createOpSendMsg() throws IOException { public void setProducer(ProducerImpl producer) { this.producer = producer; this.topicName = producer.getTopic(); - this.producerName = producer.getProducerName(); this.compressionType = CompressionCodecProvider .convertToWireProtocol(producer.getConfiguration().getCompressionType()); this.compressor = CompressionCodecProvider.getCompressionCodec(compressionType); @@ -129,4 +128,19 @@ public boolean hasSameTxn(MessageImpl msg) { return currentTxnidMostBits == msg.getMessageBuilder().getTxnidMostBits() && currentTxnidLeastBits == msg.getMessageBuilder().getTxnidLeastBits(); } + + @Override + public long getFirstAddedTimestamp() { + return firstAddedTimestamp; + } + + protected void tryUpdateTimestamp() { + if (numMessagesInBatch == 1) { + firstAddedTimestamp = System.nanoTime(); + } + } + + protected void clearTimestamp() { + firstAddedTimestamp = 0L; + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java index 68b781e67d29c..a1017e66760a5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java @@ -329,6 +329,7 @@ public AutoClusterFailoverBuilder switchBackDelay(long switchBackDelay, TimeUnit @Override public AutoClusterFailoverBuilder checkInterval(long interval, TimeUnit timeUnit) { + checkArgument(interval >= 0L, "check interval time must not be negative."); this.checkIntervalMs = timeUnit.toMillis(interval); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java index 8fb4e9f2ce543..ddbe1bc255779 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java @@ -82,4 +82,11 @@ public interface BatchMessageContainerBase extends BatchMessageContainer { * @return belong to the same txn or not */ boolean hasSameTxn(MessageImpl msg); + + /** + * Get the timestamp in nanoseconds when the 1st message is added into the batch container. + * + * @return the timestamp in nanoseconds or 0L if the batch container is empty + */ + long getFirstAddedTimestamp(); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java index fdbf1f15c296a..bf8c1f9de8201 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java @@ -89,8 +89,8 @@ public BatchMessageContainerImpl(ProducerImpl producer) { public boolean add(MessageImpl msg, SendCallback callback) { if (log.isDebugEnabled()) { - log.debug("[{}] [{}] add message to batch, num messages in batch so far {}", topicName, producerName, - numMessagesInBatch); + log.debug("[{}] [{}] add message to batch, num messages in batch so far {}", topicName, + producer.getProducerName(), numMessagesInBatch); } if (++numMessagesInBatch == 1) { @@ -127,6 +127,7 @@ public boolean add(MessageImpl msg, SendCallback callback) { previousCallback = callback; currentBatchSizeBytes += msg.getDataBuffer().readableBytes(); messages.add(msg); + tryUpdateTimestamp(); if (lowestSequenceId == -1L) { lowestSequenceId = msg.getSequenceId(); @@ -203,6 +204,7 @@ void updateMaxBatchSize(int uncompressedSize) { @Override public void clear() { + clearTimestamp(); messages = new ArrayList<>(maxMessagesNum); firstCallback = null; previousCallback = null; @@ -234,8 +236,8 @@ public void discard(Exception ex) { batchedMessageMetadataAndPayload = null; } } catch (Throwable t) { - log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topicName, producerName, - lowestSequenceId, t); + log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topicName, + producer.getProducerName(), lowestSequenceId, t); } clear(); } @@ -304,6 +306,14 @@ public OpSendMsg createOpSendMsg() throws IOException { ByteBufPair cmd = producer.sendMessage(producer.producerId, messageMetadata.getSequenceId(), messageMetadata.getHighestSequenceId(), numMessagesInBatch, messageMetadata, encryptedPayload); + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Build batch msg seq:{}, highest-seq:{}, numMessagesInBatch: {}, uncompressedSize: {}," + + " payloadSize: {}", topicName, producer.getProducerName(), + messageMetadata.getSequenceId(), messageMetadata.getNumMessagesInBatch(), + messageMetadata.getHighestSequenceId(), + messageMetadata.getUncompressedSize(), encryptedPayload.readableBytes()); + } + OpSendMsg op = OpSendMsg.create(messages, cmd, messageMetadata.getSequenceId(), messageMetadata.getHighestSequenceId(), firstCallback, batchAllocatedSizeBytes); @@ -316,9 +326,11 @@ public OpSendMsg createOpSendMsg() throws IOException { protected void updateAndReserveBatchAllocatedSize(int updatedSizeBytes) { int delta = updatedSizeBytes - batchAllocatedSizeBytes; batchAllocatedSizeBytes = updatedSizeBytes; - if (delta != 0) { - if (producer != null) { + if (producer != null) { + if (delta > 0) { producer.client.getMemoryLimitController().forceReserveMemory(delta); + } else if (delta < 0) { + producer.client.getMemoryLimitController().releaseMemory(-delta); } } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java index 45d683e72b0a3..1592d3cae6cb5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java @@ -43,8 +43,8 @@ class BatchMessageKeyBasedContainer extends AbstractBatchMessageContainer { @Override public boolean add(MessageImpl msg, SendCallback callback) { if (log.isDebugEnabled()) { - log.debug("[{}] [{}] add message to batch, num messages in batch so far is {}", topicName, producerName, - numMessagesInBatch); + log.debug("[{}] [{}] add message to batch, num messages in batch so far is {}", topicName, + producer.getProducerName(), numMessagesInBatch); } String key = getKey(msg); final BatchMessageContainerImpl batchMessageContainer = batches.computeIfAbsent(key, @@ -57,11 +57,13 @@ public boolean add(MessageImpl msg, SendCallback callback) { numMessagesInBatch++; currentBatchSizeBytes += msg.getDataBuffer().readableBytes(); } + tryUpdateTimestamp(); return isBatchFull(); } @Override public void clear() { + clearTimestamp(); numMessagesInBatch = 0; currentBatchSizeBytes = 0; batches.clear(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java index d5ce9213211dd..8ceb8e44975c8 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java @@ -26,10 +26,12 @@ import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SchemaSerializationException; @@ -56,6 +58,12 @@ public class BinaryProtoLookupService implements LookupService { private final String listenerName; private final int maxLookupRedirects; + private final ConcurrentHashMap>> + lookupInProgress = new ConcurrentHashMap<>(); + + private final ConcurrentHashMap> + partitionedMetadataInProgress = new ConcurrentHashMap<>(); + public BinaryProtoLookupService(PulsarClientImpl client, String serviceUrl, boolean useTls, @@ -92,7 +100,21 @@ public void updateServiceUrl(String serviceUrl) throws PulsarClientException { * @return broker-socket-address that serves given topic */ public CompletableFuture> getBroker(TopicName topicName) { - return findBroker(serviceNameResolver.resolveHost(), false, topicName, 0); + final MutableObject newFutureCreated = new MutableObject<>(); + try { + return lookupInProgress.computeIfAbsent(topicName, tpName -> { + CompletableFuture> newFuture = + findBroker(serviceNameResolver.resolveHost(), false, topicName, 0); + newFutureCreated.setValue(newFuture); + return newFuture; + }); + } finally { + if (newFutureCreated.getValue() != null) { + newFutureCreated.getValue().whenComplete((v, ex) -> { + lookupInProgress.remove(topicName, newFutureCreated.getValue()); + }); + } + } } /** @@ -100,7 +122,21 @@ public CompletableFuture> getBroker(T * */ public CompletableFuture getPartitionedTopicMetadata(TopicName topicName) { - return getPartitionedTopicMetadata(serviceNameResolver.resolveHost(), topicName); + final MutableObject newFutureCreated = new MutableObject<>(); + try { + return partitionedMetadataInProgress.computeIfAbsent(topicName, tpName -> { + CompletableFuture newFuture = + getPartitionedTopicMetadata(serviceNameResolver.resolveHost(), topicName); + newFutureCreated.setValue(newFuture); + return newFuture; + }); + } finally { + if (newFutureCreated.getValue() != null) { + newFutureCreated.getValue().whenComplete((v, ex) -> { + partitionedMetadataInProgress.remove(topicName, newFutureCreated.getValue()); + }); + } + } } private CompletableFuture> findBroker(InetSocketAddress socketAddress, diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java index 7677045f0899b..107f15905044c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java @@ -166,6 +166,7 @@ public ClientBuilder operationTimeout(int operationTimeout, TimeUnit unit) { @Override public ClientBuilder lookupTimeout(int lookupTimeout, TimeUnit unit) { + checkArgument(lookupTimeout >= 0, "lookupTimeout must not be negative"); conf.setLookupTimeoutMs(unit.toMillis(lookupTimeout)); return this; } @@ -331,6 +332,7 @@ public ClientBuilder keepAliveInterval(int keepAliveInterval, TimeUnit unit) { @Override public ClientBuilder connectionTimeout(int duration, TimeUnit unit) { + checkArgument(duration >= 0, "connectionTimeout needs to be >= 0"); conf.setConnectionTimeoutMs((int) unit.toMillis(duration)); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java index 115c71307c4f2..496a9ca04271a 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java @@ -30,6 +30,7 @@ import io.netty.channel.EventLoopGroup; import io.netty.channel.unix.Errors.NativeIoException; import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.netty.handler.ssl.SslHandshakeCompletionEvent; import io.netty.util.concurrent.Promise; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -1291,6 +1292,8 @@ public static PulsarClientException getPulsarClientException(ServerError error, return new PulsarClientException.IncompatibleSchemaException(errorMsg); case TopicNotFound: return new PulsarClientException.TopicDoesNotExistException(errorMsg); + case SubscriptionNotFound: + return new PulsarClientException.SubscriptionNotFoundException(errorMsg); case ConsumerAssignError: return new PulsarClientException.ConsumerAssignException(errorMsg); case NotAllowedError: @@ -1311,6 +1314,18 @@ public void close() { } } + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { + if (evt instanceof SslHandshakeCompletionEvent) { + SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) evt; + if (sslHandshakeCompletionEvent.cause() != null) { + log.warn("{} Got ssl handshake exception {}", ctx.channel(), + sslHandshakeCompletionEvent); + } + } + ctx.fireUserEventTriggered(evt); + } + protected void closeWithException(Throwable e) { if (ctx != null) { connectionFuture.completeExceptionally(e); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java index 046cb90643a23..fc7c89c3ce693 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java @@ -21,6 +21,7 @@ import java.net.InetSocketAddress; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import org.apache.pulsar.client.api.PulsarClientException; @@ -41,22 +42,36 @@ public class ConnectionHandler { // Start with -1L because it gets incremented before sending on the first connection private volatile long epoch = -1L; protected volatile long lastConnectionClosedTimestamp = 0L; + private final AtomicBoolean duringConnect = new AtomicBoolean(false); + protected final int randomKeyForSelectConnection; interface Connection { - void connectionFailed(PulsarClientException exception); - void connectionOpened(ClientCnx cnx); + + /** + * @apiNote If the returned future is completed exceptionally, reconnectLater will be called. + */ + CompletableFuture connectionOpened(ClientCnx cnx); + default void connectionFailed(PulsarClientException e) { + } } protected Connection connection; protected ConnectionHandler(HandlerState state, Backoff backoff, Connection connection) { this.state = state; + this.randomKeyForSelectConnection = state.client.getCnxPool().genRandomKeyToSelectCon(); this.connection = connection; this.backoff = backoff; CLIENT_CNX_UPDATER.set(this, null); } protected void grabCnx() { + if (!duringConnect.compareAndSet(false, true)) { + log.info("[{}] [{}] Skip grabbing the connection since there is a pending connection", + state.topic, state.getHandlerName()); + return; + } + if (CLIENT_CNX_UPDATER.get(this) != null) { log.warn("[{}] [{}] Client cnx already set, ignoring reconnection request", state.topic, state.getHandlerName()); @@ -75,13 +90,14 @@ protected void grabCnx() { if (state.redirectedClusterURI != null) { InetSocketAddress address = InetSocketAddress.createUnresolved(state.redirectedClusterURI.getHost(), state.redirectedClusterURI.getPort()); - cnxFuture = state.client.getConnection(address, address); + cnxFuture = state.client.getConnection(address, address, randomKeyForSelectConnection); } else if (state.topic == null) { cnxFuture = state.client.getConnectionToServiceUrl(); } else { - cnxFuture = state.client.getConnection(state.topic); // + cnxFuture = state.client.getConnection(state.topic, randomKeyForSelectConnection); } - cnxFuture.thenAccept(cnx -> connection.connectionOpened(cnx)) // + cnxFuture.thenCompose(cnx -> connection.connectionOpened(cnx)) + .thenAccept(__ -> duringConnect.set(false)) .exceptionally(this::handleConnectionError); } catch (Throwable t) { log.warn("[{}] [{}] Exception thrown while getting connection: ", state.topic, state.getHandlerName(), t); @@ -90,26 +106,28 @@ protected void grabCnx() { } private Void handleConnectionError(Throwable exception) { - log.warn("[{}] [{}] Error connecting to broker: {}", - state.topic, state.getHandlerName(), exception.getMessage()); - if (exception instanceof PulsarClientException) { - connection.connectionFailed((PulsarClientException) exception); - } else if (exception.getCause() instanceof PulsarClientException) { - connection.connectionFailed((PulsarClientException) exception.getCause()); - } else { - connection.connectionFailed(new PulsarClientException(exception)); - } - - State state = this.state.getState(); - if (state == State.Uninitialized || state == State.Connecting || state == State.Ready) { - reconnectLater(exception); + try { + log.warn("[{}] [{}] Error connecting to broker: {}", + state.topic, state.getHandlerName(), exception.getMessage()); + if (exception instanceof PulsarClientException) { + connection.connectionFailed((PulsarClientException) exception); + } else if (exception.getCause() instanceof PulsarClientException) { + connection.connectionFailed((PulsarClientException) exception.getCause()); + } else { + connection.connectionFailed(new PulsarClientException(exception)); + } + } catch (Throwable throwable) { + log.error("[{}] [{}] Unexpected exception after the connection", + state.topic, state.getHandlerName(), throwable); } + reconnectLater(exception); return null; } - protected void reconnectLater(Throwable exception) { + void reconnectLater(Throwable exception) { CLIENT_CNX_UPDATER.set(this, null); + duringConnect.set(false); if (!isValidStateForReconnection()) { log.info("[{}] [{}] Ignoring reconnection request (state: {})", state.topic, state.getHandlerName(), state.getState()); @@ -132,6 +150,7 @@ protected void reconnectLater(Throwable exception) { public void connectionClosed(ClientCnx cnx) { lastConnectionClosedTimestamp = System.currentTimeMillis(); + duringConnect.set(false); state.client.getCnxPool().releaseConnection(cnx); if (CLIENT_CNX_UPDATER.compareAndSet(this, cnx, null)) { if (!isValidStateForReconnection()) { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java index 1420d81c688ee..ef3a9249c820a 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java @@ -165,8 +165,18 @@ private static AddressResolver createAddressResolver(ClientCo private static final Random random = new Random(); + public int genRandomKeyToSelectCon() { + if (maxConnectionsPerHosts == 0) { + return -1; + } + return signSafeMod(random.nextInt(), maxConnectionsPerHosts); + } + public CompletableFuture getConnection(final InetSocketAddress address) { - return getConnection(address, address); + if (maxConnectionsPerHosts == 0) { + return getConnection(address, address, -1); + } + return getConnection(address, address, signSafeMod(random.nextInt(), maxConnectionsPerHosts)); } void closeAllConnections() { @@ -204,14 +214,12 @@ void closeAllConnections() { * @return a future that will produce the ClientCnx object */ public CompletableFuture getConnection(InetSocketAddress logicalAddress, - InetSocketAddress physicalAddress) { + InetSocketAddress physicalAddress, final int randomKey) { if (maxConnectionsPerHosts == 0) { // Disable pooling return createConnection(logicalAddress, physicalAddress, -1); } - final int randomKey = signSafeMod(random.nextInt(), maxConnectionsPerHosts); - final ConcurrentMap> innerPool = pool.computeIfAbsent(logicalAddress, a -> new ConcurrentHashMap<>()); CompletableFuture completableFuture = innerPool diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java index 0db2a8e0ab9f5..79cd8db4b99bb 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java @@ -64,6 +64,7 @@ import org.apache.pulsar.common.api.proto.CommandSubscribe; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.BitSetRecyclable; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.common.util.collections.GrowableArrayBlockingQueue; import org.slf4j.Logger; @@ -88,6 +89,11 @@ public abstract class ConsumerBase extends HandlerState implements Consumer>> pendingReceives; protected final int maxReceiverQueueSize; private volatile int currentReceiverQueueSize; + + protected static final AtomicIntegerFieldUpdater MESSAGE_LISTENER_QUEUE_SIZE_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(ConsumerBase.class, "messageListenerQueueSize"); + protected volatile int messageListenerQueueSize = 0; + protected static final AtomicIntegerFieldUpdater CURRENT_RECEIVER_QUEUE_SIZE_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConsumerBase.class, "currentReceiverQueueSize"); protected final Schema schema; @@ -188,6 +194,10 @@ protected ConsumerBase(PulsarClientImpl client, String topic, ConsumerConfigurat initReceiverQueueSize(); } + protected UnAckedMessageTracker getUnAckedMessageTracker() { + return unAckedMessageTracker; + } + protected void triggerBatchReceiveTimeoutTask() { if (!hasBatchReceiveTimeout() && batchReceivePolicy.getTimeoutMs() > 0) { batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, @@ -439,6 +449,7 @@ public void acknowledge(Messages messages) throws PulsarClientException { @Override public void reconsumeLater(Message message, long delayTime, TimeUnit unit) throws PulsarClientException { + checkArgument(delayTime >= 0, "The delay time must not be negative."); reconsumeLater(message, null, delayTime, unit); } @@ -553,6 +564,7 @@ public CompletableFuture reconsumeLaterAsync(Message message, long dela @Override public CompletableFuture reconsumeLaterAsync( Message message, Map customProperties, long delayTime, TimeUnit unit) { + checkArgument(delayTime >= 0, "The delay time must not be negative."); if (!conf.isRetryEnable()) { return FutureUtil.failedFuture(new PulsarClientException(RECONSUME_LATER_ERROR_MSG)); } @@ -589,12 +601,14 @@ public CompletableFuture acknowledgeCumulativeAsync(Message message) { @Override public CompletableFuture reconsumeLaterCumulativeAsync(Message message, long delayTime, TimeUnit unit) { + checkArgument(delayTime >= 0, "The delay time must not be negative."); return reconsumeLaterCumulativeAsync(message, null, delayTime, unit); } @Override public CompletableFuture reconsumeLaterCumulativeAsync( Message message, Map customProperties, long delayTime, TimeUnit unit) { + checkArgument(delayTime >= 0, "The delay time must not be negative."); if (!conf.isRetryEnable()) { return FutureUtil.failedFuture(new PulsarClientException(RECONSUME_LATER_ERROR_MSG)); } @@ -1105,6 +1119,7 @@ private void triggerListener() { // Trigger the notification on the message listener in a separate thread to avoid blocking the // internal pinned executor thread while the message processing happens final Message finalMsg = msg; + MESSAGE_LISTENER_QUEUE_SIZE_UPDATER.incrementAndGet(this); if (SubscriptionType.Key_Shared == conf.getSubscriptionType()) { executorProvider.getExecutor(peekMessageKey(msg)).execute(() -> callMessageListener(finalMsg)); @@ -1148,6 +1163,8 @@ protected void callMessageListener(Message msg) { } catch (Throwable t) { log.error("[{}][{}] Message listener error in processing message: {}", topic, subscription, msg.getMessageId(), t); + } finally { + MESSAGE_LISTENER_QUEUE_SIZE_UPDATER.decrementAndGet(this); } } @@ -1254,6 +1271,10 @@ protected boolean isValidConsumerEpoch(MessageImpl message) { return true; } + protected boolean isSingleMessageAcked(BitSetRecyclable ackBitSet, int batchIndex) { + return ackBitSet != null && !ackBitSet.get(batchIndex); + } + public boolean hasBatchReceiveTimeout() { return batchReceiveTimeout != null; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java index f644c6a18398f..895273a90c050 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java @@ -497,6 +497,7 @@ public ConsumerBuilder enableBatchIndexAcknowledgment(boolean batchIndexAckno @Override public ConsumerBuilder expireTimeOfIncompleteChunkedMessage(long duration, TimeUnit unit) { + checkArgument(duration >= 0, "expired time of incomplete chunk message must not be negative"); conf.setExpireTimeOfIncompleteChunkedMessageMillis(unit.toMillis(duration)); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java index cc01609319698..bfe8bd54849ea 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java @@ -21,6 +21,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH; import static org.apache.pulsar.common.protocol.Commands.hasChecksum; +import static org.apache.pulsar.common.protocol.Commands.serializeWithSize; import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ComparisonChain; @@ -32,9 +33,11 @@ import io.netty.util.Recycler.Handle; import io.netty.util.ReferenceCountUtil; import io.netty.util.Timeout; +import io.netty.util.concurrent.FastThreadLocal; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Collections; import java.util.HashMap; @@ -66,6 +69,7 @@ import lombok.AccessLevel; import lombok.Getter; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Triple; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.ConsumerCryptoFailureAction; import org.apache.pulsar.client.api.DeadLetterPolicy; @@ -93,7 +97,9 @@ import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.EncryptionContext; import org.apache.pulsar.common.api.EncryptionContext.EncryptionKey; +import org.apache.pulsar.common.api.proto.BaseCommand; import org.apache.pulsar.common.api.proto.BrokerEntryMetadata; +import org.apache.pulsar.common.api.proto.CommandAck; import org.apache.pulsar.common.api.proto.CommandAck.AckType; import org.apache.pulsar.common.api.proto.CommandAck.ValidationError; import org.apache.pulsar.common.api.proto.CommandMessage; @@ -116,6 +122,7 @@ import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.SafeCollectionUtils; import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.apache.pulsar.common.util.collections.ConcurrentBitSetRecyclable; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.common.util.collections.GrowableArrayBlockingQueue; import org.slf4j.Logger; @@ -412,7 +419,7 @@ public CompletableFuture unsubscribeAsync() { unsubscribeFuture.completeExceptionally( PulsarClientException.wrap(e.getCause(), String.format("Failed to unsubscribe the subscription %s of topic %s", - topicName.toString(), subscription))); + subscription, topicName.toString()))); return null; }); } else { @@ -605,6 +612,7 @@ protected CompletableFuture doReconsumeLater(Message message, AckType a retryLetterProducer = client.newProducer(Schema.AUTO_PRODUCE_BYTES(schema)) .topic(this.deadLetterPolicy.getRetryLetterTopic()) .enableBatching(false) + .enableChunking(true) .blockIfQueueFull(false) .create(); } @@ -753,16 +761,17 @@ public void negativeAcknowledge(Message message) { } @Override - public void connectionOpened(final ClientCnx cnx) { + public CompletableFuture connectionOpened(final ClientCnx cnx) { previousExceptions.clear(); - if (getState() == State.Closing || getState() == State.Closed) { + final State state = getState(); + if (state == State.Closing || state == State.Closed) { setState(State.Closed); closeConsumerTasks(); deregisterFromClientCnx(); client.cleanupConsumer(this); clearReceiverQueue(); - return; + return CompletableFuture.completedFuture(null); } log.info("[{}][{}] Subscribing to topic on cnx {}, consumerId {}", @@ -816,6 +825,7 @@ public void connectionOpened(final ClientCnx cnx) { && startMessageId.equals(initialStartMessageId)) ? startMessageRollbackDurationInSec : 0; // synchronized this, because redeliverUnAckMessage eliminate the epoch inconsistency between them + final CompletableFuture future = new CompletableFuture<>(); synchronized (this) { setClientCnx(cnx); ByteBuf request = Commands.newSubscribe(topic, subscription, consumerId, requestId, getSubType(), @@ -837,6 +847,7 @@ public void connectionOpened(final ClientCnx cnx) { deregisterFromClientCnx(); client.cleanupConsumer(this); cnx.channel().close(); + future.complete(null); return; } } @@ -849,12 +860,14 @@ public void connectionOpened(final ClientCnx cnx) { if (!(firstTimeConnect && hasParentConsumer) && getCurrentReceiverQueueSize() != 0) { increaseAvailablePermits(cnx, getCurrentReceiverQueueSize()); } + future.complete(null); }).exceptionally((e) -> { deregisterFromClientCnx(); if (getState() == State.Closing || getState() == State.Closed) { // Consumer was closed while reconnecting, close the connection to make sure the broker // drops the consumer on its side cnx.channel().close(); + future.complete(null); return null; } log.warn("[{}][{}] Failed to subscribe to topic on {}", topic, @@ -872,7 +885,7 @@ public void connectionOpened(final ClientCnx cnx) { if (e.getCause() instanceof PulsarClientException && PulsarClientException.isRetriableError(e.getCause()) && System.currentTimeMillis() < SUBSCRIBE_DEADLINE_UPDATER.get(ConsumerImpl.this)) { - reconnectLater(e.getCause()); + future.completeExceptionally(e.getCause()); } else if (!subscribeFuture.isDone()) { // unable to create new consumer, fail operation setState(State.Failed); @@ -896,11 +909,16 @@ public void connectionOpened(final ClientCnx cnx) { topic, subscription, cnx.channel().remoteAddress()); } else { // consumer was subscribed and connected but we got some error, keep trying - reconnectLater(e.getCause()); + future.completeExceptionally(e.getCause()); + } + + if (!future.isDone()) { + future.complete(null); } return null; }); } + return future; } protected void consumerIsReconnectedToBroker(ClientCnx cnx, int currentQueueSize) { @@ -984,7 +1002,7 @@ public void connectionFailed(PulsarClientException exception) { setState(State.Failed); if (nonRetriableError) { log.info("[{}] Consumer creation failed for consumer {} with unretriableError {}", - topic, consumerId, exception); + topic, consumerId, exception.getMessage()); } else { log.info("[{}] Consumer creation failed for consumer {} after timeout", topic, consumerId); } @@ -1163,7 +1181,7 @@ protected MessageImpl newSingleMessage(final int index, return null; } - if (ackBitSet != null && !ackBitSet.get(index)) { + if (isSingleMessageAcked(ackBitSet, index)) { return null; } @@ -1401,7 +1419,9 @@ void messageReceived(CommandMessage cmdMessage, ByteBuf headersAndPayload, Clien private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata msgMetadata, MessageIdImpl msgId, MessageIdData messageId, ClientCnx cnx) { - + if (msgMetadata.getChunkId() != (msgMetadata.getNumChunksFromMsg() - 1)) { + increaseAvailablePermits(cnx); + } // Lazy task scheduling to expire incomplete chunk message if (expireTimeOfIncompleteChunkedMessageMillis > 0 && expireChunkMessageTaskScheduled.compareAndSet(false, true)) { @@ -1415,7 +1435,48 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m ChunkedMessageCtx chunkedMsgCtx = chunkedMessagesMap.get(msgMetadata.getUuid()); - if (msgMetadata.getChunkId() == 0 && chunkedMsgCtx == null) { + if (msgMetadata.getChunkId() == 0) { + if (chunkedMsgCtx != null) { + // Handle ack hole case when receive duplicated chunks. + // There are two situation that receives chunks with the same sequence ID and chunk ID. + // Situation 1 - Message redeliver: + // For example: + // Chunk-1 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-2 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-3 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-4 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-5 sequence ID: 0, chunk ID: 2, msgID: 1:3 + // In this case, chunk-3 and chunk-4 have the same msgID with chunk-1 and chunk-2. + // This may be caused by message redeliver, we can't ack any chunk in this case here. + // Situation 2 - Corrupted chunk message + // For example: + // Chunk-1 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-2 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-3 sequence ID: 0, chunk ID: 0, msgID: 1:3 + // Chunk-4 sequence ID: 0, chunk ID: 1, msgID: 1:4 + // Chunk-5 sequence ID: 0, chunk ID: 2, msgID: 1:5 + // In this case, all the chunks with different msgIDs and are persistent in the topic. + // But Chunk-1 and Chunk-2 belong to a corrupted chunk message that must be skipped since + // they will not be delivered to end users. So we should ack them here to avoid ack hole. + boolean isCorruptedChunkMessageDetected = Arrays.stream(chunkedMsgCtx.chunkedMessageIds) + .noneMatch(messageId1 -> messageId1 != null && messageId1.ledgerId == messageId.getLedgerId() + && messageId1.entryId == messageId.getEntryId()); + if (isCorruptedChunkMessageDetected) { + Arrays.stream(chunkedMsgCtx.chunkedMessageIds).forEach(messageId1 -> { + if (messageId1 != null) { + doAcknowledge(messageId1, AckType.Individual, Collections.emptyMap(), null); + } + }); + } + // The first chunk of a new chunked-message received before receiving other chunks of previous + // chunked-message + // so, remove previous chunked-message from map and release buffer + if (chunkedMsgCtx.chunkedMsgBuffer != null) { + ReferenceCountUtil.safeRelease(chunkedMsgCtx.chunkedMsgBuffer); + } + chunkedMsgCtx.recycle(); + chunkedMessagesMap.remove(msgMetadata.getUuid()); + } pendingChunkedMessageCount++; if (maxPendingChunkedMessage > 0 && pendingChunkedMessageCount > maxPendingChunkedMessage) { removeOldestPendingChunkedMessage(); @@ -1431,8 +1492,34 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m // discard message if chunk is out-of-order if (chunkedMsgCtx == null || chunkedMsgCtx.chunkedMsgBuffer == null || msgMetadata.getChunkId() != (chunkedMsgCtx.lastChunkedMessageId + 1)) { + // Filter and ack duplicated chunks instead of discard ctx. + // For example: + // Chunk-1 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-2 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-3 sequence ID: 0, chunk ID: 2, msgID: 1:3 + // Chunk-4 sequence ID: 0, chunk ID: 1, msgID: 1:4 + // Chunk-5 sequence ID: 0, chunk ID: 2, msgID: 1:5 + // Chunk-6 sequence ID: 0, chunk ID: 3, msgID: 1:6 + // We should filter and ack chunk-4 and chunk-5. + if (chunkedMsgCtx != null && msgMetadata.getChunkId() <= chunkedMsgCtx.lastChunkedMessageId) { + log.warn("[{}] Receive a duplicated chunk message with messageId [{}], last-chunk-Id [{}], " + + "chunkId [{}], sequenceId [{}]", + msgMetadata.getProducerName(), msgId, chunkedMsgCtx.lastChunkedMessageId, + msgMetadata.getChunkId(), msgMetadata.getSequenceId()); + compressedPayload.release(); + // Just like the above logic of receiving the first chunk again. We only ack this chunk in the message + // duplication case. + boolean isDuplicatedChunk = Arrays.stream(chunkedMsgCtx.chunkedMessageIds) + .noneMatch(messageId1 -> messageId1 != null && messageId1.ledgerId == messageId.getLedgerId() + && messageId1.entryId == messageId.getEntryId()); + if (isDuplicatedChunk) { + doAcknowledge(msgId, AckType.Individual, Collections.emptyMap(), null); + } + return null; + } // means we lost the first chunk: should never happen - log.info("Received unexpected chunk messageId {}, last-chunk-id{}, chunkId = {}", msgId, + log.info("[{}] [{}] Received unexpected chunk messageId {}, last-chunk-id = {}, chunkId = {}", topic, + subscription, msgId, (chunkedMsgCtx != null ? chunkedMsgCtx.lastChunkedMessageId : null), msgMetadata.getChunkId()); if (chunkedMsgCtx != null) { if (chunkedMsgCtx.chunkedMsgBuffer != null) { @@ -1442,7 +1529,6 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m } chunkedMessagesMap.remove(msgMetadata.getUuid()); compressedPayload.release(); - increaseAvailablePermits(cnx); if (expireTimeOfIncompleteChunkedMessageMillis > 0 && System.currentTimeMillis() > (msgMetadata.getPublishTime() + expireTimeOfIncompleteChunkedMessageMillis)) { @@ -1461,7 +1547,6 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m // if final chunk is not received yet then release payload and return if (msgMetadata.getChunkId() != (msgMetadata.getNumChunksFromMsg() - 1)) { compressedPayload.release(); - increaseAvailablePermits(cnx); return null; } @@ -1546,7 +1631,14 @@ void receiveIndividualMessagesFromBatch(BrokerEntryMetadata brokerEntryMetadata, singleMessageMetadata, uncompressedPayload, batchMessage, schema, true, ackBitSet, ackSetInMessageId, redeliveryCount, consumerEpoch); if (message == null) { - skippedMessages++; + // If it is not in ackBitSet, it means Broker does not want to deliver it to the client, and + // did not decrease the permits in the broker-side. + // So do not acquire more permits for this message. + // Why not skip this single message in the first line of for-loop block? We need call + // "newSingleMessage" to move "payload.readerIndex" to a correct value to get the correct data. + if (!isSingleMessageAcked(ackBitSet, i)) { + skippedMessages++; + } continue; } if (possibleToDeadLetter != null) { @@ -2076,6 +2168,8 @@ private void initDeadLetterProducerIfNeeded() { .initialSubscriptionName(this.deadLetterPolicy.getInitialSubscriptionName()) .topic(this.deadLetterPolicy.getDeadLetterTopic()) .blockIfQueueFull(false) + .enableBatching(false) + .enableChunking(true) .createAsync(); } } finally { @@ -2151,9 +2245,18 @@ private CompletableFuture seekAsyncInternal(long requestId, ByteBuf seek, final CompletableFuture seekFuture = new CompletableFuture<>(); ClientCnx cnx = cnx(); + if (!duringSeek.compareAndSet(false, true)) { + final String message = String.format( + "[%s][%s] attempting to seek operation that is already in progress (seek by %s)", + topic, subscription, seekBy); + log.warn("[{}][{}] Attempting to seek operation that is already in progress, cancelling {}", + topic, subscription, seekBy); + seekFuture.completeExceptionally(new IllegalStateException(message)); + return seekFuture; + } + MessageIdAdv originSeekMessageId = seekMessageId; seekMessageId = (MessageIdAdv) seekId; - duringSeek.set(true); log.info("[{}][{}] Seeking subscription to {}", topic, subscription, seekBy); cnx.sendRequestWithId(seek, requestId).thenRun(() -> { @@ -2228,6 +2331,10 @@ public boolean hasMessageAvailable() throws PulsarClientException { public CompletableFuture hasMessageAvailableAsync() { final CompletableFuture booleanFuture = new CompletableFuture<>(); + if (incomingMessages != null && !incomingMessages.isEmpty()) { + return CompletableFuture.completedFuture(true); + } + // we haven't read yet. use startMessageId for comparison if (lastDequeuedMessageId == MessageId.earliest) { // if we are starting from latest, we should seek to the actual last message first. @@ -2345,7 +2452,7 @@ public CompletableFuture getLastMessageIdAsync() { @Override public CompletableFuture> getLastMessageIdsAsync() { return getLastMessageIdAsync() - .thenApply(msgId -> Collections.singletonList(TopicMessageId.create(topic, msgId))); + .thenApply(msgId -> Collections.singletonList(new TopicMessageIdImpl(topic, (MessageIdAdv) msgId))); } public CompletableFuture internalGetLastMessageIdAsync() { @@ -2427,9 +2534,9 @@ private void internalGetLastMessageIdAsync(final Backoff backoff, return; } + log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms", + topic, getHandlerName(), nextDelay); ((ScheduledExecutorService) client.getScheduledExecutorProvider().getExecutor()).schedule(() -> { - log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms", - topic, getHandlerName(), nextDelay); remainingTime.addAndGet(-nextDelay); internalGetLastMessageIdAsync(backoff, remainingTime, future); }, nextDelay, TimeUnit.MILLISECONDS); @@ -2574,10 +2681,6 @@ void deregisterFromClientCnx() { setClientCnx(null); } - void reconnectLater(Throwable exception) { - this.connectionHandler.reconnectLater(exception); - } - void grabCnx() { this.connectionHandler.grabCnx(); } @@ -2687,7 +2790,7 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me final MessageIdAdv messageIdAdv = (MessageIdAdv) messageId; final long ledgerId = messageIdAdv.getLedgerId(); final long entryId = messageIdAdv.getEntryId(); - final ByteBuf cmd; + final List cmdList; if (MessageIdAdvUtils.isBatch(messageIdAdv)) { BitSetRecyclable bitSetRecyclable = BitSetRecyclable.create(); bitSetRecyclable.set(0, messageIdAdv.getBatchSize()); @@ -2697,12 +2800,37 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me } else { bitSetRecyclable.clear(messageIdAdv.getBatchIndex()); } - cmd = Commands.newAck(consumerId, ledgerId, entryId, bitSetRecyclable, ackType, validationError, properties, - txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId, messageIdAdv.getBatchSize()); + cmdList = Collections.singletonList(Commands.newAck(consumerId, ledgerId, entryId, bitSetRecyclable, + ackType, validationError, properties, txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId, + messageIdAdv.getBatchSize())); bitSetRecyclable.recycle(); } else { - cmd = Commands.newAck(consumerId, ledgerId, entryId, null, ackType, validationError, properties, - txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId); + MessageIdImpl[] chunkMsgIds = this.unAckedChunkedMessageIdSequenceMap.remove(messageIdAdv); + // cumulative ack chunk by the last messageId + if (chunkMsgIds == null || ackType == AckType.Cumulative) { + cmdList = Collections.singletonList(Commands.newAck(consumerId, ledgerId, entryId, null, ackType, + validationError, properties, txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId)); + } else { + if (Commands.peerSupportsMultiMessageAcknowledgment( + getClientCnx().getRemoteEndpointProtocolVersion())) { + List> entriesToAck = + new ArrayList<>(chunkMsgIds.length); + for (MessageIdImpl cMsgId : chunkMsgIds) { + if (cMsgId != null && chunkMsgIds.length > 1) { + entriesToAck.add(Triple.of(cMsgId.getLedgerId(), cMsgId.getEntryId(), null)); + } + } + cmdList = Collections.singletonList( + newMultiTransactionMessageAck(consumerId, txnID, entriesToAck, requestId)); + } else { + cmdList = new ArrayList<>(); + for (MessageIdImpl cMsgId : chunkMsgIds) { + cmdList.add(Commands.newAck(consumerId, cMsgId.ledgerId, cMsgId.entryId, null, ackType, + validationError, properties, + txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId)); + } + } + } } if (ackType == AckType.Cumulative) { @@ -2716,8 +2844,55 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me .ConnectException("Failed to ack message [" + messageId + "] " + "for transaction [" + txnID + "] due to consumer connect fail, consumer state: " + getState())); } else { - return cnx.newAckForReceipt(cmd, requestId); + List> completableFutures = new LinkedList<>(); + cmdList.forEach(cmd -> completableFutures.add(cnx.newAckForReceipt(cmd, requestId))); + return FutureUtil.waitForAll(completableFutures); + } + } + + private ByteBuf newMultiTransactionMessageAck(long consumerId, TxnID txnID, + List> entries, + long requestID) { + BaseCommand cmd = newMultiMessageAckCommon(entries); + cmd.getAck() + .setConsumerId(consumerId) + .setAckType(AckType.Individual) + .setTxnidLeastBits(txnID.getLeastSigBits()) + .setTxnidMostBits(txnID.getMostSigBits()) + .setRequestId(requestID); + return serializeWithSize(cmd); + } + + private static final FastThreadLocal LOCAL_BASE_COMMAND = new FastThreadLocal() { + @Override + protected BaseCommand initialValue() throws Exception { + return new BaseCommand(); + } + }; + + private static BaseCommand newMultiMessageAckCommon(List> entries) { + BaseCommand cmd = LOCAL_BASE_COMMAND.get() + .clear() + .setType(BaseCommand.Type.ACK); + CommandAck ack = cmd.setAck(); + int entriesCount = entries.size(); + for (int i = 0; i < entriesCount; i++) { + long ledgerId = entries.get(i).getLeft(); + long entryId = entries.get(i).getMiddle(); + ConcurrentBitSetRecyclable bitSet = entries.get(i).getRight(); + MessageIdData msgId = ack.addMessageId() + .setLedgerId(ledgerId) + .setEntryId(entryId); + if (bitSet != null) { + long[] ackSet = bitSet.toLongArray(); + for (int j = 0; j < ackSet.length; j++) { + msgId.addAckSet(ackSet[j]); + } + bitSet.recycle(); + } } + + return cmd; } private CompletableFuture doTransactionAcknowledgeForResponse(List messageIds, AckType ackType, diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java index 8630bedc65f31..3a47ddc5d4b08 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java @@ -238,7 +238,11 @@ public void updateCumulativeStats(ConsumerStats stats) { @Override public Integer getMsgNumInReceiverQueue() { if (consumer instanceof ConsumerBase) { - return ((ConsumerBase) consumer).incomingMessages.size(); + ConsumerBase consumerBase = (ConsumerBase) consumer; + if (consumerBase.listener != null){ + return ConsumerBase.MESSAGE_LISTENER_QUEUE_SIZE_UPDATER.get(consumerBase); + } + return consumerBase.incomingMessages.size(); } return null; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java index 080d328e3f02c..9d30108ec7a1d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java @@ -236,6 +236,7 @@ public ControlledClusterFailoverBuilder urlProviderHeader(Map he @Override public ControlledClusterFailoverBuilder checkInterval(long interval, @NonNull TimeUnit timeUnit) { + checkArgument(interval >= 0, "The check interval time must not be negative."); this.interval = timeUnit.toMillis(interval); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java index 7969ce402363f..e33efabcc9e0e 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java @@ -19,7 +19,6 @@ package org.apache.pulsar.client.impl; import io.netty.channel.EventLoopGroup; -import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.nio.ByteBuffer; @@ -178,18 +177,14 @@ public CompletableFuture> getSchema(TopicName topicName, by } httpClient.get(path, GetSchemaResponse.class).thenAccept(response -> { if (response.getType() == SchemaType.KEY_VALUE) { - try { - SchemaData data = SchemaData - .builder() - .data(SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema( - response.getData().getBytes(StandardCharsets.UTF_8))) - .type(response.getType()) - .props(response.getProperties()) - .build(); - future.complete(Optional.of(SchemaInfoUtil.newSchemaInfo(schemaName, data))); - } catch (IOException err) { - future.completeExceptionally(err); - } + SchemaData data = SchemaData + .builder() + .data(SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema( + response.getData().getBytes(StandardCharsets.UTF_8))) + .type(response.getType()) + .props(response.getProperties()) + .build(); + future.complete(Optional.of(SchemaInfoUtil.newSchemaInfo(schemaName, data))); } else { future.complete(Optional.of(SchemaInfoUtil.newSchemaInfo(schemaName, response))); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java index 935e3fad2b59d..c15821c054325 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java @@ -22,7 +22,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; +import lombok.extern.slf4j.Slf4j; +@Slf4j public class MemoryLimitController { private final long memoryLimit; @@ -46,11 +48,19 @@ public MemoryLimitController(long memoryLimitBytes, long triggerThreshold, Runna } public void forceReserveMemory(long size) { + checkPositive(size); + if (size == 0) { + return; + } long newUsage = currentUsage.addAndGet(size); checkTrigger(newUsage - size, newUsage); } public boolean tryReserveMemory(long size) { + checkPositive(size); + if (size == 0) { + return true; + } while (true) { long current = currentUsage.get(); long newUsage = current + size; @@ -68,6 +78,15 @@ public boolean tryReserveMemory(long size) { } } + private static void checkPositive(long memorySize) { + if (memorySize < 0) { + String errorMsg = String.format("Try to reserve/release memory failed, the param memorySize" + + " is a negative value: %s", memorySize); + log.error(errorMsg); + throw new IllegalArgumentException(errorMsg); + } + } + private void checkTrigger(long prevUsage, long newUsage) { if (newUsage >= triggerThreshold && prevUsage < triggerThreshold && trigger != null) { if (triggerRunning.compareAndSet(false, true)) { @@ -81,6 +100,10 @@ private void checkTrigger(long prevUsage, long newUsage) { } public void reserveMemory(long size) throws InterruptedException { + checkPositive(size); + if (size == 0) { + return; + } if (!tryReserveMemory(size)) { mutex.lock(); try { @@ -94,6 +117,10 @@ public void reserveMemory(long size) throws InterruptedException { } public void releaseMemory(long size) { + checkPositive(size); + if (size == 0) { + return; + } long newUsage = currentUsage.addAndGet(-size); if (newUsage + size > memoryLimit && newUsage <= memoryLimit) { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java index c8b18524ec052..a0d1446ba3d55 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java @@ -64,6 +64,9 @@ static boolean isBatch(MessageIdAdv msgId) { } static MessageIdAdv discardBatch(MessageId messageId) { + if (messageId instanceof ChunkMessageIdImpl) { + return (MessageIdAdv) messageId; + } MessageIdAdv msgId = (MessageIdAdv) messageId; return new MessageIdImpl(msgId.getLedgerId(), msgId.getEntryId(), msgId.getPartitionIndex()); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdImpl.java index 83ee762578390..8cffba44dc5ca 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdImpl.java @@ -128,7 +128,7 @@ public static MessageId fromByteArrayWithTopic(byte[] data, TopicName topicName) throw new IOException(e); } - MessageId messageId; + MessageIdAdv messageId; if (idData.hasBatchIndex()) { if (idData.hasBatchSize()) { messageId = new BatchMessageIdImpl(idData.getLedgerId(), idData.getEntryId(), idData.getPartition(), @@ -143,7 +143,7 @@ public static MessageId fromByteArrayWithTopic(byte[] data, TopicName topicName) } if (idData.getPartition() > -1 && topicName != null) { messageId = new TopicMessageIdImpl( - topicName.getPartition(idData.getPartition()).toString(), topicName.toString(), messageId); + topicName.getPartition(idData.getPartition()).toString(), messageId); } return messageId; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java index ef0345de919ea..fd65a9371d47b 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java @@ -49,6 +49,7 @@ import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import javax.annotation.Nullable; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.BatchReceivePolicy; import org.apache.pulsar.client.api.Consumer; @@ -92,7 +93,7 @@ public class MultiTopicsConsumerImpl extends ConsumerBase { // sum of topicPartitions, simple topic has 1, partitioned topic equals to partition number. AtomicInteger allTopicPartitionsNumber; - private boolean paused = false; + private volatile boolean paused = false; private final Object pauseMutex = new Object(); // timeout related to auto check and subscribe partition increasement private volatile Timeout partitionsAutoUpdateTimeout = null; @@ -101,7 +102,8 @@ public class MultiTopicsConsumerImpl extends ConsumerBase { private final MultiTopicConsumerStatsRecorderImpl stats; private final ConsumerConfigurationData internalConfig; - private volatile MessageIdAdv startMessageId; + private final MessageIdAdv startMessageId; + private volatile boolean duringSeek = false; private final long startMessageRollbackDurationInSec; MultiTopicsConsumerImpl(PulsarClientImpl client, ConsumerConfigurationData conf, ExecutorProvider executorProvider, CompletableFuture> subscribeFuture, Schema schema, @@ -235,6 +237,10 @@ private void startReceivingMessages(List> newConsumers) { } private void receiveMessageFromConsumer(ConsumerImpl consumer, boolean batchReceive) { + if (duringSeek) { + log.info("[{}] Pause receiving messages for topic {} due to seek", subscription, consumer.getTopic()); + return; + } CompletableFuture>> messagesFuture; if (batchReceive) { messagesFuture = consumer.batchReceiveAsync().thenApply(msgs -> ((MessagesImpl) msgs).getMessageList()); @@ -252,8 +258,12 @@ private void receiveMessageFromConsumer(ConsumerImpl consumer, boolean batchR } // Process the message, add to the queue and trigger listener or async callback messages.forEach(msg -> { - if (isValidConsumerEpoch((MessageImpl) msg)) { + final boolean skipDueToSeek = duringSeek; + if (isValidConsumerEpoch((MessageImpl) msg) && !skipDueToSeek) { messageReceived(consumer, msg); + } else if (skipDueToSeek) { + log.info("[{}] [{}] Skip processing message {} received during seek", topic, subscription, + msg.getMessageId()); } }); @@ -746,17 +756,12 @@ public void seek(Function function) throws PulsarClientException @Override public CompletableFuture seekAsync(Function function) { - List> futures = new ArrayList<>(consumers.size()); - consumers.values().forEach(consumer -> futures.add(consumer.seekAsync(function))); - unAckedMessageTracker.clear(); - incomingMessages.clear(); - resetIncomingMessageSize(); - return FutureUtil.waitForAll(futures); + return seekAllAsync(consumer -> consumer.seekAsync(function)); } @Override public CompletableFuture seekAsync(MessageId messageId) { - final Consumer internalConsumer; + final ConsumerImpl internalConsumer; if (messageId instanceof TopicMessageId) { TopicMessageId topicMessageId = (TopicMessageId) messageId; internalConsumer = consumers.get(topicMessageId.getOwnerTopic()); @@ -773,25 +778,46 @@ public CompletableFuture seekAsync(MessageId messageId) { ); } - final CompletableFuture seekFuture; if (internalConsumer == null) { - List> futures = new ArrayList<>(consumers.size()); - consumers.values().forEach(consumerImpl -> futures.add(consumerImpl.seekAsync(messageId))); - seekFuture = FutureUtil.waitForAll(futures); + return seekAllAsync(consumer -> consumer.seekAsync(messageId)); } else { - seekFuture = internalConsumer.seekAsync(messageId); + return seekAsyncInternal(Collections.singleton(internalConsumer), __ -> __.seekAsync(messageId)); } + } + + @Override + public CompletableFuture seekAsync(long timestamp) { + return seekAllAsync(consumer -> consumer.seekAsync(timestamp)); + } + + private CompletableFuture seekAsyncInternal(Collection> consumers, + Function, CompletableFuture> seekFunc) { + beforeSeek(); + final CompletableFuture future = new CompletableFuture<>(); + FutureUtil.waitForAll(consumers.stream().map(seekFunc).collect(Collectors.toList())) + .whenComplete((__, e) -> afterSeek(future, e)); + return future; + } + private CompletableFuture seekAllAsync(Function, CompletableFuture> seekFunc) { + return seekAsyncInternal(consumers.values(), seekFunc); + } + + private void beforeSeek() { + duringSeek = true; unAckedMessageTracker.clear(); clearIncomingMessages(); - return seekFuture; } - @Override - public CompletableFuture seekAsync(long timestamp) { - List> futures = new ArrayList<>(consumers.size()); - consumers.values().forEach(consumer -> futures.add(consumer.seekAsync(timestamp))); - return FutureUtil.waitForAll(futures); + private void afterSeek(CompletableFuture seekFuture, @Nullable Throwable throwable) { + duringSeek = false; + log.info("[{}] Resume receiving messages for {} since seek is done", subscription, consumers.keySet()); + startReceivingMessages(new ArrayList<>(consumers.values())); + if (throwable == null) { + seekFuture.complete(null); + } else { + seekFuture.completeExceptionally(throwable); + } } @Override @@ -900,7 +926,10 @@ private void removeTopic(String topic) { } } - // subscribe one more given topic + /*** + * Subscribe one more given topic. + * @param topicName topic name without the partition suffix. + */ public CompletableFuture subscribeAsync(String topicName, boolean createTopicIfDoesNotExist) { TopicName topicNameInstance = getTopicName(topicName); if (topicNameInstance == null) { @@ -996,13 +1025,13 @@ CompletableFuture subscribeAsync(String topicName, int numberPartitions) { private void subscribeTopicPartitions(CompletableFuture subscribeResult, String topicName, int numPartitions, boolean createIfDoesNotExist) { - client.preProcessSchemaBeforeSubscribe(client, schema, topicName).whenComplete((schema, cause) -> { - if (null == cause) { - doSubscribeTopicPartitions(schema, subscribeResult, topicName, numPartitions, createIfDoesNotExist); - } else { - subscribeResult.completeExceptionally(cause); - } - }); + client.preProcessSchemaBeforeSubscribe(client, schema, topicName) + .thenAccept(schema -> { + doSubscribeTopicPartitions(schema, subscribeResult, topicName, numPartitions, createIfDoesNotExist); + }).exceptionally(cause -> { + subscribeResult.completeExceptionally(cause); + return null; + }); } private void doSubscribeTopicPartitions(Schema schema, @@ -1059,29 +1088,28 @@ private void doSubscribeTopicPartitions(Schema schema, CompletableFuture> subFuture = new CompletableFuture<>(); - consumers.compute(topicName, (key, existingValue) -> { - if (existingValue != null) { - String errorMessage = String.format("[%s] Failed to subscribe for topic [%s] in topics consumer. " - + "Topic is already being subscribed for in other thread.", topic, topicName); - log.warn(errorMessage); - subscribeResult.completeExceptionally(new PulsarClientException(errorMessage)); - return existingValue; - } else { - internalConfig.setStartPaused(paused); - ConsumerImpl newConsumer = createInternalConsumer(internalConfig, topicName, - -1, subFuture, createIfDoesNotExist, schema); - - synchronized (pauseMutex) { + synchronized (pauseMutex) { + consumers.compute(topicName, (key, existingValue) -> { + if (existingValue != null) { + String errorMessage = + String.format("[%s] Failed to subscribe for topic [%s] in topics consumer. " + + "Topic is already being subscribed for in other thread.", topic, topicName); + log.warn(errorMessage); + subscribeResult.completeExceptionally(new PulsarClientException(errorMessage)); + return existingValue; + } else { + internalConfig.setStartPaused(paused); + ConsumerImpl newConsumer = createInternalConsumer(internalConfig, topicName, + -1, subFuture, createIfDoesNotExist, schema); if (paused) { newConsumer.pause(); } else { newConsumer.resume(); } + return newConsumer; } - return newConsumer; - } - }); - + }); + } futureList = Collections.singletonList(subFuture); } @@ -1222,7 +1250,10 @@ public CompletableFuture unsubscribeAsync(String topicName) { return unsubscribeFuture; } - // Remove a consumer for a topic + /*** + * Remove a consumer for a topic. + * @param topicName topic name contains the partition suffix. + */ public CompletableFuture removeConsumerAsync(String topicName) { checkArgument(TopicName.isValid(topicName), "Invalid topic name:" + topicName); @@ -1408,7 +1439,7 @@ private CompletableFuture subscribeIncreasedTopicPartitions(String topicNa } if (log.isDebugEnabled()) { log.debug("[{}] create consumer {} for partitionName: {}", - topicName, newConsumer.getTopic(), partitionName); + topicName, newConsumer.getTopic(), partitionName); } return subFuture; }) diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java index f780edc95c136..bf7f1066173f6 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java @@ -436,7 +436,7 @@ public CompletableFuture onTopicsExtended(Collection topicsExtende }); // call interceptor with the metadata change onPartitionsChange(topic, currentPartitionNumber); - return null; + return future; } } else { log.error("[{}] not support shrink topic partitions. old: {}, new: {}", diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java index 12c7e4e4ba3c7..4d179f7d914c2 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java @@ -31,6 +31,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.pulsar.client.api.Consumer; @@ -50,11 +51,25 @@ public class PatternMultiTopicsConsumerImpl extends MultiTopicsConsumerImpl watcherFuture; + private final CompletableFuture watcherFuture = new CompletableFuture<>(); protected NamespaceName namespaceName; + + /** + * There is two task to re-check topic changes, the both tasks will not be take affects at the same time. + * 1. {@link #recheckTopicsChangeAfterReconnect}: it will be called after the {@link TopicListWatcher} reconnected + * if you enabled {@link TopicListWatcher}. This backoff used to do a retry if + * {@link #recheckTopicsChangeAfterReconnect} is failed. + * 2. {@link #run} A scheduled task to trigger re-check topic changes, it will be used if you disabled + * {@link TopicListWatcher}. + */ + private final Backoff recheckPatternTaskBackoff; + private final AtomicInteger recheckPatternEpoch = new AtomicInteger(); private volatile Timeout recheckPatternTimeout = null; private volatile String topicsHash; + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public PatternMultiTopicsConsumerImpl(Pattern topicsPattern, String topicsHash, PulsarClientImpl client, @@ -69,6 +84,11 @@ public PatternMultiTopicsConsumerImpl(Pattern topicsPattern, this.topicsPattern = topicsPattern; this.topicsHash = topicsHash; this.subscriptionMode = subscriptionMode; + this.recheckPatternTaskBackoff = new BackoffBuilder() + .setInitialTime(client.getConfiguration().getInitialBackoffIntervalNanos(), TimeUnit.NANOSECONDS) + .setMax(client.getConfiguration().getMaxBackoffIntervalNanos(), TimeUnit.NANOSECONDS) + .setMandatoryStop(0, TimeUnit.SECONDS) + .create(); if (this.namespaceName == null) { this.namespaceName = getNameSpaceFromPattern(topicsPattern); @@ -78,15 +98,16 @@ public PatternMultiTopicsConsumerImpl(Pattern topicsPattern, this.topicsChangeListener = new PatternTopicsChangedListener(); this.recheckPatternTimeout = client.timer() .newTimeout(this, Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); - this.watcherFuture = new CompletableFuture<>(); if (subscriptionMode == Mode.PERSISTENT) { long watcherId = client.newTopicListWatcherId(); new TopicListWatcher(topicsChangeListener, client, topicsPattern, watcherId, - namespaceName, topicsHash, watcherFuture); - watcherFuture.exceptionally(ex -> { - log.debug("Unable to create topic list watcher. Falling back to only polling for new topics", ex); - return null; - }); + namespaceName, topicsHash, watcherFuture, () -> recheckTopicsChangeAfterReconnect()); + watcherFuture + .thenAccept(__ -> recheckPatternTimeout.cancel()) + .exceptionally(ex -> { + log.warn("Unable to create topic list watcher. Falling back to only polling for new topics", ex); + return null; + }); } else { log.debug("Not creating topic list watcher for subscription mode {}", subscriptionMode); watcherFuture.complete(null); @@ -97,40 +118,75 @@ public static NamespaceName getNameSpaceFromPattern(Pattern pattern) { return TopicName.get(pattern.pattern()).getNamespaceObject(); } + /** + * This method will be called after the {@link TopicListWatcher} reconnected after enabled {@link TopicListWatcher}. + */ + private void recheckTopicsChangeAfterReconnect() { + // Skip if closed or the task has been cancelled. + if (getState() == State.Closing || getState() == State.Closed) { + return; + } + // Do check. + recheckTopicsChange().whenComplete((ignore, ex) -> { + if (ex != null) { + log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); + long delayMs = recheckPatternTaskBackoff.next(); + client.timer().newTimeout(timeout -> { + recheckTopicsChangeAfterReconnect(); + }, delayMs, TimeUnit.MILLISECONDS); + } else { + recheckPatternTaskBackoff.reset(); + } + }); + } + // TimerTask to recheck topics change, and trigger subscribe/unsubscribe based on the change. @Override public void run(Timeout timeout) throws Exception { if (timeout.isCancelled()) { return; } - client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode, topicsPattern.pattern(), topicsHash) - .thenCompose(getTopicsResult -> { + recheckTopicsChange().exceptionally(ex -> { + log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); + return null; + }).thenAccept(__ -> { + // schedule the next re-check task + this.recheckPatternTimeout = client.timer() + .newTimeout(PatternMultiTopicsConsumerImpl.this, + Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); + }); + } - if (log.isDebugEnabled()) { - log.debug("Get topics under namespace {}, topics.size: {}, topicsHash: {}, filtered: {}", - namespaceName, getTopicsResult.getTopics().size(), getTopicsResult.getTopicsHash(), - getTopicsResult.isFiltered()); - getTopicsResult.getTopics().forEach(topicName -> - log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); - } + private CompletableFuture recheckTopicsChange() { + String pattern = topicsPattern.pattern(); + final int epoch = recheckPatternEpoch.incrementAndGet(); + return client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode, pattern, topicsHash) + .thenCompose(getTopicsResult -> { + // If "recheckTopicsChange" has been called more than one times, only make the last one take affects. + // Use "synchronized (recheckPatternTaskBackoff)" instead of + // `synchronized(PatternMultiTopicsConsumerImpl.this)` to avoid locking in a wider range. + synchronized (recheckPatternTaskBackoff) { + if (recheckPatternEpoch.get() > epoch) { + return CompletableFuture.completedFuture(null); + } + if (log.isDebugEnabled()) { + log.debug("Get topics under namespace {}, topics.size: {}, topicsHash: {}, filtered: {}", + namespaceName, getTopicsResult.getTopics().size(), getTopicsResult.getTopicsHash(), + getTopicsResult.isFiltered()); + getTopicsResult.getTopics().forEach(topicName -> + log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); + } - final List oldTopics = new ArrayList<>(getPartitionedTopics()); - for (String partition : getPartitions()) { - TopicName topicName = TopicName.get(partition); - if (!topicName.isPartitioned() || !oldTopics.contains(topicName.getPartitionedTopicName())) { - oldTopics.add(partition); + final List oldTopics = new ArrayList<>(getPartitionedTopics()); + for (String partition : getPartitions()) { + TopicName topicName = TopicName.get(partition); + if (!topicName.isPartitioned() || !oldTopics.contains(topicName.getPartitionedTopicName())) { + oldTopics.add(partition); + } } + return updateSubscriptions(topicsPattern, this::setTopicsHash, getTopicsResult, + topicsChangeListener, oldTopics); } - return updateSubscriptions(topicsPattern, this::setTopicsHash, getTopicsResult, - topicsChangeListener, oldTopics); - }).exceptionally(ex -> { - log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); - return null; - }).thenAccept(__ -> { - // schedule the next re-check task - this.recheckPatternTimeout = client.timer() - .newTimeout(PatternMultiTopicsConsumerImpl.this, - Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); }); } @@ -167,14 +223,26 @@ void setTopicsHash(String topicsHash) { } interface TopicsChangedListener { - // unsubscribe and delete ConsumerImpl in the `consumers` map in `MultiTopicsConsumerImpl` based on added topics + /*** + * unsubscribe and delete {@link ConsumerImpl} in the {@link MultiTopicsConsumerImpl#consumers} map in + * {@link MultiTopicsConsumerImpl}. + * @param removedTopics topic names removed(contains the partition suffix). + */ CompletableFuture onTopicsRemoved(Collection removedTopics); - // subscribe and create a list of new ConsumerImpl, added them to the `consumers` map in - // `MultiTopicsConsumerImpl`. + + /*** + * subscribe and create a list of new {@link ConsumerImpl}, added them to the + * {@link MultiTopicsConsumerImpl#consumers} map in {@link MultiTopicsConsumerImpl}. + * @param addedTopics topic names added(contains the partition suffix). + */ CompletableFuture onTopicsAdded(Collection addedTopics); } private class PatternTopicsChangedListener implements TopicsChangedListener { + + /** + * {@inheritDoc} + */ @Override public CompletableFuture onTopicsRemoved(Collection removedTopics) { CompletableFuture removeFuture = new CompletableFuture<>(); @@ -196,6 +264,9 @@ public CompletableFuture onTopicsRemoved(Collection removedTopics) return removeFuture; } + /** + * {@inheritDoc} + */ @Override public CompletableFuture onTopicsAdded(Collection addedTopics) { CompletableFuture addFuture = new CompletableFuture<>(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java index 9086ccc4ef0e0..0cf776aea5942 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java @@ -124,7 +124,18 @@ public boolean isDuplicate(MessageId messageId) { // Already included in a cumulative ack return true; } else { - return pendingIndividualAcks.contains(MessageIdAdvUtils.discardBatch(messageIdAdv)); + // If "batchIndexAckEnabled" is false, the batched messages acknowledgment will be traced by + // pendingIndividualAcks. So no matter what type the message ID is, check with "pendingIndividualAcks" + // first. + MessageIdAdv key = MessageIdAdvUtils.discardBatch(messageIdAdv); + if (pendingIndividualAcks.contains(key)) { + return true; + } + if (messageIdAdv.getBatchIndex() >= 0) { + ConcurrentBitSetRecyclable bitSet = pendingIndividualBatchIndexAcks.get(key); + return bitSet != null && !bitSet.get(messageIdAdv.getBatchIndex()); + } + return false; } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java index 2192ebfb64e75..8d2d917d7b880 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java @@ -695,6 +695,12 @@ private void serializeAndSendMessage(MessageImpl msg, op = OpSendMsg.create(msg, null, sequenceId, callback); final MessageMetadata finalMsgMetadata = msgMetadata; op.rePopulate = () -> { + if (msgMetadata.hasChunkId()) { + // The message metadata is shared between all chunks in a large message + // We need to reset the chunk id for each call of this method + // It's safe to do that because there is only 1 thread to manipulate this message metadata + finalMsgMetadata.setChunkId(chunkId); + } op.cmd = sendMessage(producerId, sequenceId, numMessages, messageId, finalMsgMetadata, encryptedPayload); }; @@ -1649,8 +1655,9 @@ public Iterator iterator() { } } + @Override - public void connectionOpened(final ClientCnx cnx) { + public CompletableFuture connectionOpened(final ClientCnx cnx) { previousExceptions.clear(); chunkMaxMessageSize = Math.min(chunkMaxMessageSize, ClientCnx.getMaxMessageSize()); @@ -1659,7 +1666,7 @@ public void connectionOpened(final ClientCnx cnx) { // Because the state could have been updated while retrieving the connection, we set it back to connecting, // as long as the change from current state to connecting is a valid state change. if (!changeToConnecting()) { - return; + return CompletableFuture.completedFuture(null); } // We set the cnx reference before registering the producer on the cnx, so if the cnx breaks before creating // the producer, it will try to grab a new cnx. We also increment and get the epoch value for the producer. @@ -1699,6 +1706,7 @@ public void connectionOpened(final ClientCnx cnx) { } } + final CompletableFuture future = new CompletableFuture<>(); cnx.sendRequestWithId( Commands.newProducer(topic, producerId, requestId, producerName, conf.isEncryptionEnabled(), metadata, schemaInfo, epoch, userProvidedProducerName, @@ -1713,11 +1721,13 @@ public void connectionOpened(final ClientCnx cnx) { // We are now reconnected to broker and clear to send messages. Re-send all pending messages and // set the cnx pointer so that new messages will be sent immediately synchronized (ProducerImpl.this) { - if (getState() == State.Closing || getState() == State.Closed) { + State state = getState(); + if (state == State.Closing || state == State.Closed) { // Producer was closed while reconnecting, close the connection to make sure the broker // drops the producer on its side cnx.removeProducer(producerId); cnx.channel().close(); + future.complete(null); return; } resetBackoff(); @@ -1744,13 +1754,16 @@ public void connectionOpened(final ClientCnx cnx) { resendMessages(cnx, epoch); } + future.complete(null); }).exceptionally((e) -> { Throwable cause = e.getCause(); cnx.removeProducer(producerId); - if (getState() == State.Closing || getState() == State.Closed) { + State state = getState(); + if (state == State.Closing || state == State.Closed) { // Producer was closed while reconnecting, close the connection to make sure the broker // drops the producer on its side cnx.channel().close(); + future.complete(null); return null; } @@ -1779,6 +1792,7 @@ public void connectionOpened(final ClientCnx cnx) { } producerCreatedFuture.completeExceptionally(cause); }); + future.complete(null); return null; } if (cause instanceof PulsarClientException.ProducerBlockedQuotaExceededException) { @@ -1822,7 +1836,7 @@ public void connectionOpened(final ClientCnx cnx) { && System.currentTimeMillis() < PRODUCER_DEADLINE_UPDATER.get(ProducerImpl.this))) { // Either we had already created the producer once (producerCreatedFuture.isDone()) or we are // still within the initial timeout budget and we are dealing with a retriable error - reconnectLater(cause); + future.completeExceptionally(cause); } else { setState(State.Failed); producerCreatedFuture.completeExceptionally(cause); @@ -1834,9 +1848,12 @@ public void connectionOpened(final ClientCnx cnx) { sendTimeout = null; } } - + if (!future.isDone()) { + future.complete(null); + } return null; }); + return future; } @Override @@ -1848,7 +1865,7 @@ public void connectionFailed(PulsarClientException exception) { if (producerCreatedFuture.completeExceptionally(exception)) { if (nonRetriableError) { log.info("[{}] Producer creation failed for producer {} with unretriableError = {}", - topic, producerId, exception); + topic, producerId, exception.getMessage()); } else { log.info("[{}] Producer creation failed for producer {} after producerTimeout", topic, producerId); } @@ -1962,6 +1979,11 @@ String getHandlerName() { return producerName; } + @VisibleForTesting + void triggerSendTimer() throws Exception { + run(sendTimeout); + } + /** * Process sendTimeout events. */ @@ -1980,7 +2002,8 @@ public void run(Timeout timeout) throws Exception { } OpSendMsg firstMsg = pendingMessages.peek(); - if (firstMsg == null && (batchMessageContainer == null || batchMessageContainer.isEmpty())) { + if (firstMsg == null && (batchMessageContainer == null || batchMessageContainer.isEmpty() + || batchMessageContainer.getFirstAddedTimestamp() == 0L)) { // If there are no pending messages, reset the timeout to the configured value. timeToWaitMs = conf.getSendTimeoutMs(); } else { @@ -1990,7 +2013,7 @@ public void run(Timeout timeout) throws Exception { } else { // Because we don't flush batch messages while disconnected, we consider them "createdAt" when // they would have otherwise been flushed. - createdAt = lastBatchSendNanoTime + createdAt = batchMessageContainer.getFirstAddedTimestamp() + TimeUnit.MICROSECONDS.toNanos(conf.getBatchingMaxPublishDelayMicros()); } // If there is at least one message, calculate the diff between the message timeout and the elapsed @@ -2364,10 +2387,6 @@ void setClientCnx(ClientCnx clientCnx) { this.connectionHandler.setClientCnx(clientCnx); } - void reconnectLater(Throwable exception) { - this.connectionHandler.reconnectLater(exception); - } - void grabCnx() { this.connectionHandler.grabCnx(); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java index 6c749a8cf4354..fdabb5fa8cfa5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java @@ -944,10 +944,20 @@ public void updateTlsTrustStorePathAndPassword(String tlsTrustStorePath, String conf.setTlsTrustStorePassword(tlsTrustStorePassword); } + public CompletableFuture getConnection(final String topic, int randomKeyForSelectConnection) { + TopicName topicName = TopicName.get(topic); + return lookup.getBroker(topicName) + .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight(), randomKeyForSelectConnection)); + } + + /** + * Only for test. + */ + @VisibleForTesting public CompletableFuture getConnection(final String topic) { TopicName topicName = TopicName.get(topic); return lookup.getBroker(topicName) - .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight())); + .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight(), cnxPool.genRandomKeyToSelectCon())); } public CompletableFuture getConnectionToServiceUrl() { @@ -956,12 +966,13 @@ public CompletableFuture getConnectionToServiceUrl() { "Can't get client connection to HTTP service URL", null)); } InetSocketAddress address = lookup.resolveHost(); - return getConnection(address, address); + return getConnection(address, address, cnxPool.genRandomKeyToSelectCon()); } public CompletableFuture getConnection(final InetSocketAddress logicalAddress, - final InetSocketAddress physicalAddress) { - return cnxPool.getConnection(logicalAddress, physicalAddress); + final InetSocketAddress physicalAddress, + final int randomKeyForSelectConnection) { + return cnxPool.getConnection(logicalAddress, physicalAddress, randomKeyForSelectConnection); } /** visible for pulsar-functions. **/ diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java index 1b069c5172dd7..346eb20ef4cc5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.client.impl; - import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.Charset; @@ -35,9 +34,11 @@ import org.apache.pulsar.client.api.BatcherBuilder; import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.MessagePayloadFactory; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.TopicMessageId; import org.apache.pulsar.client.api.schema.GenericRecord; import org.apache.pulsar.client.api.schema.GenericSchema; import org.apache.pulsar.client.api.schema.RecordSchemaBuilder; @@ -387,4 +388,19 @@ public SchemaInfo newSchemaInfoImpl(String name, byte[] schema, SchemaType type, Map propertiesValue) { return new SchemaInfoImpl(name, schema, type, timestamp, propertiesValue); } + + @Override + public TopicMessageId newTopicMessageId(String topic, MessageId messageId) { + final MessageIdAdv messageIdAdv; + if (messageId instanceof MessageIdAdv) { + messageIdAdv = (MessageIdAdv) messageId; + } else { + try { + messageIdAdv = (MessageIdAdv) MessageId.fromByteArray(messageId.toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return new TopicMessageIdImpl(topic, messageIdAdv); + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java index ca2011cf18a42..bed0f49f16068 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java @@ -267,6 +267,7 @@ public ReaderBuilder autoAckOldestChunkedMessageOnQueueFull(boolean autoAckOl @Override public ReaderBuilder expireTimeOfIncompleteChunkedMessage(long duration, TimeUnit unit) { + checkArgument(duration >= 0, "The expired time must not be negative."); conf.setExpireTimeOfIncompleteChunkedMessageMillis(unit.toMillis(duration)); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java index ff5b251ad55ff..d46f7fa140887 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.impl; +import static org.apache.pulsar.common.topics.TopicCompactionStrategy.TABLE_VIEW_TAG; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -65,7 +66,8 @@ public class TableViewImpl implements TableView { this.immutableData = Collections.unmodifiableMap(data); this.listeners = new ArrayList<>(); this.listenersMutex = new ReentrantLock(); - this.compactionStrategy = TopicCompactionStrategy.load(conf.getTopicCompactionStrategyClassName()); + this.compactionStrategy = + TopicCompactionStrategy.load(TABLE_VIEW_TAG, conf.getTopicCompactionStrategyClassName()); ReaderBuilder readerBuilder = client.newReader(schema) .topic(conf.getTopicName()) .startMessageId(MessageId.earliest) @@ -192,6 +194,14 @@ private void handleMessage(Message msg) { if (compactionStrategy != null) { T prev = data.get(key); update = !compactionStrategy.shouldKeepLeft(prev, cur); + if (!update) { + log.info("Skipped the message from topic {}. key={} value={} prev={}", + conf.getTopicName(), + key, + cur, + prev); + compactionStrategy.handleSkippedMessage(key, cur); + } } if (update) { @@ -274,8 +284,14 @@ private void readTailMessages(Reader reader) { log.error("Reader {} was closed while reading tail messages.", reader.getTopic(), ex); } else { + // Retrying on the other exceptions such as NotConnectedException + try { + Thread.sleep(50); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } log.warn("Reader {} was interrupted while reading tail messages. " - + "Retrying..", reader.getTopic(), ex); + + "Retrying..", reader.getTopic(), ex); readTailMessages(reader); } return null; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java index 384d1b688b8d5..86adf69f06e0f 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java @@ -56,12 +56,18 @@ public class TopicListWatcher extends HandlerState implements ConnectionHandler. private final List previousExceptions = new CopyOnWriteArrayList<>(); private final AtomicReference clientCnxUsedForWatcherRegistration = new AtomicReference<>(); + private final Runnable recheckTopicsChangeAfterReconnect; + + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public TopicListWatcher(PatternMultiTopicsConsumerImpl.TopicsChangedListener topicsChangeListener, PulsarClientImpl client, Pattern topicsPattern, long watcherId, NamespaceName namespace, String topicsHash, - CompletableFuture watcherFuture) { - super(client, null); + CompletableFuture watcherFuture, + Runnable recheckTopicsChangeAfterReconnect) { + super(client, topicsPattern.pattern()); this.topicsChangeListener = topicsChangeListener; this.name = "Watcher(" + topicsPattern + ")"; this.connectionHandler = new ConnectionHandler(this, @@ -77,6 +83,7 @@ public TopicListWatcher(PatternMultiTopicsConsumerImpl.TopicsChangedListener top this.namespace = namespace; this.topicsHash = topicsHash; this.watcherFuture = watcherFuture; + this.recheckTopicsChangeAfterReconnect = recheckTopicsChangeAfterReconnect; connectionHandler.grabCnx(); } @@ -89,7 +96,7 @@ public void connectionFailed(PulsarClientException exception) { if (watcherFuture.completeExceptionally(exception)) { setState(State.Failed); log.info("[{}] Watcher creation failed for {} with non-retriable error {}", - topic, name, exception); + topic, name, exception.getMessage()); deregisterFromClientCnx(); } } else { @@ -98,13 +105,14 @@ public void connectionFailed(PulsarClientException exception) { } @Override - public void connectionOpened(ClientCnx cnx) { + public CompletableFuture connectionOpened(ClientCnx cnx) { previousExceptions.clear(); - if (getState() == State.Closing || getState() == State.Closed) { + State state = getState(); + if (state == State.Closing || state == State.Closed) { setState(State.Closed); deregisterFromClientCnx(); - return; + return CompletableFuture.completedFuture(null); } log.info("[{}][{}] Creating topic list watcher on cnx {}, watcherId {}", @@ -116,6 +124,7 @@ public void connectionOpened(ClientCnx cnx) { .compareAndSet(this, 0L, System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs()); + final CompletableFuture future = new CompletableFuture<>(); // synchronized this, because redeliverUnAckMessage eliminate the epoch inconsistency between them synchronized (this) { setClientCnx(cnx); @@ -132,20 +141,22 @@ public void connectionOpened(ClientCnx cnx) { setState(State.Closed); deregisterFromClientCnx(); cnx.channel().close(); + future.complete(null); return; } } - this.connectionHandler.resetBackoff(); + recheckTopicsChangeAfterReconnect.run(); watcherFuture.complete(this); - + future.complete(null); }).exceptionally((e) -> { deregisterFromClientCnx(); if (getState() == State.Closing || getState() == State.Closed) { // Watcher was closed while reconnecting, close the connection to make sure the broker // drops the watcher on its side cnx.channel().close(); + future.complete(null); return null; } log.warn("[{}][{}] Failed to create topic list watcher on {}", @@ -155,7 +166,7 @@ public void connectionOpened(ClientCnx cnx) { && PulsarClientException.isRetriableError(e.getCause()) && System.currentTimeMillis() < CREATE_WATCHER_DEADLINE_UPDATER.get(TopicListWatcher.this)) { - reconnectLater(e.getCause()); + future.completeExceptionally(e.getCause()); } else if (!watcherFuture.isDone()) { // unable to create new watcher, fail operation setState(State.Failed); @@ -164,11 +175,15 @@ public void connectionOpened(ClientCnx cnx) { + "when connecting to the broker", getHandlerName()))); } else { // watcher was subscribed and connected, but we got some error, keep trying - reconnectLater(e.getCause()); + future.completeExceptionally(e.getCause()); + } + if (!future.isDone()) { + future.complete(null); } return null; }); } + return future; } @Override @@ -249,11 +264,6 @@ void deregisterFromClientCnx() { setClientCnx(null); } - void reconnectLater(Throwable exception) { - this.connectionHandler.reconnectLater(exception); - } - - private void cleanupAtClose(CompletableFuture closeFuture, Throwable exception) { log.info("[{}] Closed topic list watcher", getHandlerName()); setState(State.Closed); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageIdImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageIdImpl.java index 189dc1c608379..3dc9b23e93e86 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageIdImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageIdImpl.java @@ -18,15 +18,27 @@ */ package org.apache.pulsar.client.impl; +import java.util.BitSet; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.TopicMessageId; -public class TopicMessageIdImpl extends TopicMessageId.Impl { +public class TopicMessageIdImpl implements MessageIdAdv, TopicMessageId { - private final String topicName; + private final String ownerTopic; + private final MessageIdAdv msgId; + private final String topicName; // it's never used + public TopicMessageIdImpl(String topic, MessageIdAdv msgId) { + this.ownerTopic = topic; + this.msgId = msgId; + this.topicName = ""; + } + + @Deprecated public TopicMessageIdImpl(String topicPartitionName, String topicName, MessageId messageId) { - super(topicPartitionName, messageId); + this.msgId = (MessageIdAdv) messageId; + this.ownerTopic = topicPartitionName; this.topicName = topicName; } @@ -50,16 +62,71 @@ public String getTopicPartitionName() { @Deprecated public MessageId getInnerMessageId() { - return new MessageIdImpl(getLedgerId(), getEntryId(), getPartitionIndex()); + return msgId; } @Override public boolean equals(Object obj) { - return super.equals(obj); + return msgId.equals(obj); } @Override public int hashCode() { - return super.hashCode(); + return msgId.hashCode(); + } + + @Override + public int compareTo(MessageId o) { + return msgId.compareTo(o); + } + + @Override + public byte[] toByteArray() { + return msgId.toByteArray(); + } + + @Override + public String getOwnerTopic() { + return ownerTopic; + } + + @Override + public long getLedgerId() { + return msgId.getLedgerId(); + } + + @Override + public long getEntryId() { + return msgId.getEntryId(); + } + + @Override + public int getPartitionIndex() { + return msgId.getPartitionIndex(); + } + + @Override + public int getBatchIndex() { + return msgId.getBatchIndex(); + } + + @Override + public int getBatchSize() { + return msgId.getBatchSize(); + } + + @Override + public BitSet getAckSet() { + return msgId.getAckSet(); + } + + @Override + public MessageIdAdv getFirstChunkMessageId() { + return msgId.getFirstChunkMessageId(); + } + + @Override + public String toString() { + return msgId.toString(); } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java index d24ecbd6aa917..1fec08a43f137 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java @@ -22,6 +22,7 @@ import java.util.Optional; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.api.EncryptionContext; @@ -42,11 +43,11 @@ public class TopicMessageImpl implements Message { this.receivedByconsumer = receivedByConsumer; this.msg = msg; - this.messageId = new TopicMessageIdImpl(topicPartitionName, topicPartitionName, msg.getMessageId()); + this.messageId = new TopicMessageIdImpl(topicPartitionName, (MessageIdAdv) msg.getMessageId()); } /** - * Get the topic name without partition part of this message. + * Get the topic name with partition part of this message. * @return the name of the topic on which this message was published */ @Override diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java index 601fa2b8f815a..bf5c1f556de8c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.impl; +import static com.google.common.base.Preconditions.checkArgument; import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import io.netty.util.Recycler; @@ -123,14 +124,17 @@ public void connectionFailed(PulsarClientException exception) { } @Override - public void connectionOpened(ClientCnx cnx) { + public CompletableFuture connectionOpened(ClientCnx cnx) { + final CompletableFuture future = new CompletableFuture<>(); internalPinnedExecutor.execute(() -> { LOG.info("Transaction meta handler with transaction coordinator id {} connection opened.", transactionCoordinatorId); - if (getState() == State.Closing || getState() == State.Closed) { + State state = getState(); + if (state == State.Closing || state == State.Closed) { setState(State.Closed); failPendingRequest(); + future.complete(null); return; } @@ -146,6 +150,7 @@ public void connectionOpened(ClientCnx cnx) { this.connectionHandler.resetBackoff(); pendingRequests.forEach((requestID, opBase) -> checkStateAndSendRequest(opBase)); } + future.complete(null); }); }).exceptionally((e) -> { internalPinnedExecutor.execute(() -> { @@ -155,16 +160,19 @@ public void connectionOpened(ClientCnx cnx) { || e.getCause() instanceof PulsarClientException.NotAllowedException) { setState(State.Closed); cnx.channel().close(); + future.complete(null); } else { - connectionHandler.reconnectLater(e.getCause()); + future.completeExceptionally(e.getCause()); } }); return null; }); } else { registerToConnection(cnx); + future.complete(null); } }); + return future; } private boolean registerToConnection(ClientCnx cnx) { @@ -196,6 +204,7 @@ private void failPendingRequest() { } public CompletableFuture newTransactionAsync(long timeout, TimeUnit unit) { + checkArgument(timeout >= 0, "The timeout must not be negative."); if (LOG.isDebugEnabled()) { LOG.debug("New transaction with timeout in ms {}", unit.toMillis(timeout)); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java index 026f8a1e69e0b..895949fdf32cc 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java @@ -212,6 +212,7 @@ public TypedMessageBuilder disableReplication() { @Override public TypedMessageBuilder deliverAfter(long delay, TimeUnit unit) { + checkArgument(delay >= 0, "The delay time must not be negative."); return deliverAt(System.currentTimeMillis() + unit.toMillis(delay)); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java index 534f33350267d..220771e426fec 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java @@ -138,8 +138,11 @@ public void run(Timeout t) throws Exception { if (!headPartition.isEmpty()) { log.info("[{}] {} messages will be re-delivered", consumerBase, headPartition.size()); headPartition.forEach(messageId -> { - addChunkedMessageIdsAndRemoveFromSequenceMap(messageId, messageIds, consumerBase); - messageIds.add(messageId); + if (messageId instanceof ChunkMessageIdImpl) { + addChunkedMessageIdsAndRemoveFromSequenceMap(messageId, messageIds, consumerBase); + } else { + messageIds.add(messageId); + } messageIdPartitionMap.remove(messageId); }); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java index 8760926792cd7..3ae0e977d13c4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java @@ -65,7 +65,7 @@ public class ConsumerConfigurationData implements Serializable, Cloneable { @ApiModelProperty( name = "topicsPattern", - value = "Topic pattern" + value = "The regexp for the topic name(not contains partition suffix)." ) private Pattern topicsPattern; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java index c5e9d4781c56f..255dedcbb85e9 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.impl.transaction; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; @@ -45,6 +46,7 @@ public TransactionBuilderImpl(PulsarClientImpl client, TransactionCoordinatorCli @Override public TransactionBuilder withTransactionTimeout(long txnTimeout, TimeUnit timeoutUnit) { + checkArgument(txnTimeout >= 0, "The txn timeout must not be negative."); this.txnTimeout = txnTimeout; this.timeUnit = timeoutUnit; return this; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java index b7e085ed82a85..d1260ba045e6d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java @@ -21,6 +21,7 @@ import com.google.common.collect.Lists; import io.netty.util.Timeout; import io.netty.util.TimerTask; +import java.util.Arrays; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; @@ -186,13 +187,14 @@ public void registerAckOp(CompletableFuture newAckFuture) { @Override public CompletableFuture commit() { timeout.cancel(); - return checkIfOpenOrCommitting().thenCompose((value) -> { + return checkState(State.OPEN, State.COMMITTING).thenCompose((value) -> { CompletableFuture commitFuture = new CompletableFuture<>(); this.state = State.COMMITTING; opFuture.whenComplete((v, e) -> { if (hasOpsFailed) { - abort().whenComplete((vx, ex) -> commitFuture.completeExceptionally(new PulsarClientException - .TransactionHasOperationFailedException())); + checkState(State.COMMITTING).thenCompose(__ -> internalAbort()).whenComplete((vx, ex) -> + commitFuture.completeExceptionally( + new PulsarClientException.TransactionHasOperationFailedException())); } else { tcClient.commitAsync(txnId) .whenComplete((vx, ex) -> { @@ -216,28 +218,30 @@ public CompletableFuture commit() { @Override public CompletableFuture abort() { timeout.cancel(); - return checkIfOpenOrAborting().thenCompose(value -> { - CompletableFuture abortFuture = new CompletableFuture<>(); - this.state = State.ABORTING; - opFuture.whenComplete((v, e) -> { - tcClient.abortAsync(txnId).whenComplete((vx, ex) -> { + return checkState(State.OPEN, State.ABORTING).thenCompose(__ -> internalAbort()); + } - if (ex != null) { - if (ex instanceof TransactionNotFoundException - || ex instanceof InvalidTxnStatusException) { - this.state = State.ERROR; - } - abortFuture.completeExceptionally(ex); - } else { - this.state = State.ABORTED; - abortFuture.complete(null); + private CompletableFuture internalAbort() { + CompletableFuture abortFuture = new CompletableFuture<>(); + this.state = State.ABORTING; + opFuture.whenComplete((v, e) -> { + tcClient.abortAsync(txnId).whenComplete((vx, ex) -> { + + if (ex != null) { + if (ex instanceof TransactionNotFoundException + || ex instanceof InvalidTxnStatusException) { + this.state = State.ERROR; } + abortFuture.completeExceptionally(ex); + } else { + this.state = State.ABORTED; + abortFuture.complete(null); + } - }); }); - - return abortFuture; }); + + return abortFuture; } @Override @@ -261,25 +265,15 @@ public boolean checkIfOpen(CompletableFuture completableFuture) { } } - private CompletableFuture checkIfOpenOrCommitting() { - if (state == State.OPEN || state == State.COMMITTING) { - return CompletableFuture.completedFuture(null); - } else { - return invalidTxnStatusFuture(); - } - } - - private CompletableFuture checkIfOpenOrAborting() { - if (state == State.OPEN || state == State.ABORTING) { - return CompletableFuture.completedFuture(null); - } else { - return invalidTxnStatusFuture(); + private CompletableFuture checkState(State... expectedStates) { + final State actualState = STATE_UPDATE.get(this); + for (State expectedState : expectedStates) { + if (actualState == expectedState) { + return CompletableFuture.completedFuture(null); + } } - } - - private CompletableFuture invalidTxnStatusFuture() { return FutureUtil.failedFuture(new InvalidTxnStatusException("[" + txnIdMostBits + ":" - + txnIdLeastBits + "] with unexpected state : " - + state.name() + ", expect " + State.OPEN + " state!")); + + txnIdLeastBits + "] with unexpected state: " + actualState.name() + ", expect: " + + Arrays.toString(expectedStates))); } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java index dc057ffe32daf..cf0620edf98df 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.util; +import static com.google.common.base.Preconditions.checkArgument; import java.time.Clock; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; @@ -33,6 +34,7 @@ public class ObjectCache implements Supplier { public ObjectCache(Supplier supplier, long cacheDuration, TimeUnit unit) { this(supplier, cacheDuration, unit, Clock.systemUTC()); + checkArgument(cacheDuration >= 0, "The cache duration must not be negative."); } ObjectCache(Supplier supplier, long cacheDuration, TimeUnit unit, Clock clock) { diff --git a/pulsar-client/src/main/resources/findbugsExclude.xml b/pulsar-client/src/main/resources/findbugsExclude.xml index 92ec9e934ee1e..44758c90c75de 100644 --- a/pulsar-client/src/main/resources/findbugsExclude.xml +++ b/pulsar-client/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java index 0418a54c772cc..1d1a6f85bfd41 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java @@ -65,6 +65,8 @@ public void setup() throws NoSuchFieldException, IllegalAccessException { ConcurrentOpenHashMap.newBuilder().build(); cnx = spy(new ClientCnxTest(new ClientConfigurationData(), eventLoopGroup)); PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); doReturn(client).when(consumer).getClient(); doReturn(cnx).when(consumer).getClientCnx(); doReturn(new ConsumerStatsRecorderImpl()).when(consumer).getStats(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java index 36ffa30296bb0..63fbb239439bd 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; @@ -121,6 +122,8 @@ public void testInitialize() { AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); @@ -163,6 +166,8 @@ public void testAutoClusterFailoverSwitchWithoutAuthentication() { AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); @@ -217,6 +222,8 @@ public void testAutoClusterFailoverSwitchWithAuthentication() throws IOException AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); @@ -270,6 +277,8 @@ public void testAutoClusterFailoverSwitchTlsTrustStore() throws IOException { AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java index 4b80e19c256d7..abb195c9830d0 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java @@ -105,6 +105,8 @@ public void recoveryAfterOom() { final ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); producerConfigurationData.setCompressionType(CompressionType.NONE); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); MemoryLimitController memoryLimitController = mock(MemoryLimitController.class); when(pulsarClient.getMemoryLimitController()).thenReturn(memoryLimitController); try { @@ -148,6 +150,8 @@ public void testMessagesSize() throws Exception { final ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); producerConfigurationData.setCompressionType(CompressionType.NONE); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); MemoryLimitController memoryLimitController = mock(MemoryLimitController.class); when(pulsarClient.getMemoryLimitController()).thenReturn(memoryLimitController); try { diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java index 4db5dbe877685..ff7d7f12dd452 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java @@ -19,7 +19,9 @@ package org.apache.pulsar.client.impl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import io.netty.channel.ChannelHandlerContext; @@ -72,11 +74,16 @@ static PulsarClientImpl mockClientCnx(PulsarClientImpl clientMock) { .thenReturn(CompletableFuture.completedFuture(mock(ProducerResponse.class))); when(clientCnxMock.channel().remoteAddress()).thenReturn(mock(SocketAddress.class)); when(clientMock.getConnection(any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); - when(clientMock.getConnection(any(), any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(clientMock.getConnection(anyString())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(clientMock.getConnection(anyString(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(clientMock.getConnection(any(), any(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(clientCnxMock)); ConnectionPool connectionPoolMock = mock(ConnectionPool.class); when(clientMock.getCnxPool()).thenReturn(connectionPoolMock); when(connectionPoolMock.getConnection(any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); - when(connectionPoolMock.getConnection(any(), any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(connectionPoolMock.getConnection(any(), any(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(clientCnxMock)); return clientMock; } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java index 8dbd23f9c29c9..3fe136630462f 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java @@ -76,6 +76,8 @@ public class ConsumerBuilderImplTest { @BeforeMethod(alwaysRun = true) public void setup() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); ConsumerConfigurationData consumerConfigurationData = mock(ConsumerConfigurationData.class); when(consumerConfigurationData.getTopicsPattern()).thenReturn(Pattern.compile("\\w+")); when(consumerConfigurationData.getSubscriptionName()).thenReturn("testSubscriptionName"); @@ -104,6 +106,8 @@ public void testConsumerBuilderImpl() throws PulsarClientException { @Test(expectedExceptions = IllegalArgumentException.class) public void testConsumerBuilderImplWhenSchemaIsNull() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); ConsumerConfigurationData consumerConfigurationData = mock(ConsumerConfigurationData.class); new ConsumerBuilderImpl(client, consumerConfigurationData, null); } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java index 29d180f5f9a16..5a223d5da15c0 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -26,7 +27,9 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; - +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertTrue; +import io.netty.buffer.ByteBuf; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutorService; @@ -259,4 +262,26 @@ public void testTopicPriorityLevel() { assertThat(consumer.getPriorityLevel()).isEqualTo(1); } + + @Test(invocationTimeOut = 1000) + public void testSeekAsyncInternal() { + // given + ClientCnx cnx = mock(ClientCnx.class); + CompletableFuture clientReq = new CompletableFuture<>(); + when(cnx.sendRequestWithId(any(ByteBuf.class), anyLong())).thenReturn(clientReq); + + consumer.setClientCnx(cnx); + consumer.setState(HandlerState.State.Ready); + + // when + CompletableFuture firstResult = consumer.seekAsync(1L); + CompletableFuture secondResult = consumer.seekAsync(1L); + + clientReq.complete(null); + + // then + assertTrue(firstResult.isDone()); + assertTrue(secondResult.isCompletedExceptionally()); + verify(cnx, times(1)).sendRequestWithId(any(ByteBuf.class), anyLong()); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java index 570b139832806..227e0db10b724 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java @@ -31,6 +31,7 @@ import org.testng.annotations.Test; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; @Test(groups = "broker-impl") public class ControlledClusterFailoverTest { @@ -88,6 +89,8 @@ public void testControlledClusterFailoverSwitch() throws IOException { ControlledClusterFailover controlledClusterFailover = Mockito.spy((ControlledClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); controlledClusterFailover.initialize(pulsarClient); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java index 78ffa247f7b6e..1aaf3f77da490 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java @@ -21,6 +21,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -197,4 +198,39 @@ public void testStepRelease() throws Exception { assertTrue(l3.await(1, TimeUnit.SECONDS)); assertEquals(mlc.currentUsage(), 101); } + + @Test + public void testModifyMemoryFailedDueToNegativeParam() throws Exception { + MemoryLimitController mlc = new MemoryLimitController(100); + + try { + mlc.tryReserveMemory(-1); + fail("The test should fail due to calling tryReserveMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + try { + mlc.reserveMemory(-1); + fail("The test should fail due to calling reserveMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + try { + mlc.forceReserveMemory(-1); + fail("The test should fail due to calling forceReserveMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + try { + mlc.releaseMemory(-1); + fail("The test should fail due to calling releaseMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + assertEquals(mlc.currentUsage(), 0); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageIdCompareToTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageIdCompareToTest.java index 4f0eca6ea4af8..fd81e9d5790ad 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageIdCompareToTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageIdCompareToTest.java @@ -148,15 +148,12 @@ public void testMessageIdImplCompareToTopicMessageId() { MessageIdImpl messageIdImpl = new MessageIdImpl(123L, 345L, 567); TopicMessageIdImpl topicMessageId1 = new TopicMessageIdImpl( "test-topic-partition-0", - "test-topic", new BatchMessageIdImpl(123L, 345L, 566, 789)); TopicMessageIdImpl topicMessageId2 = new TopicMessageIdImpl( "test-topic-partition-0", - "test-topic", new BatchMessageIdImpl(123L, 345L, 567, 789)); TopicMessageIdImpl topicMessageId3 = new TopicMessageIdImpl( "test-topic-partition-0", - "test-topic", new BatchMessageIdImpl(messageIdImpl)); assertTrue(messageIdImpl.compareTo(topicMessageId1) > 0, "Expected to be greater than"); assertTrue(messageIdImpl.compareTo(topicMessageId2) < 0, "Expected to be less than"); @@ -173,11 +170,9 @@ public void testBatchMessageIdImplCompareToTopicMessageId() { BatchMessageIdImpl messageIdImpl3 = new BatchMessageIdImpl(123L, 345L, 567, -1); TopicMessageIdImpl topicMessageId1 = new TopicMessageIdImpl( "test-topic-partition-0", - "test-topic", new MessageIdImpl(123L, 345L, 566)); TopicMessageIdImpl topicMessageId2 = new TopicMessageIdImpl( "test-topic-partition-0", - "test-topic", new MessageIdImpl(123L, 345L, 567)); assertTrue(messageIdImpl1.compareTo(topicMessageId1) > 0, "Expected to be greater than"); assertTrue(messageIdImpl1.compareTo(topicMessageId2) > 0, "Expected to be greater than"); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java index 223881d85a87b..2bd18f69386f1 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java @@ -70,6 +70,8 @@ public class PartitionedProducerImplTest { @BeforeMethod(alwaysRun = true) public void setup() { client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); schema = mock(Schema.class); producerInterceptors = mock(ProducerInterceptors.class); producerCreatedFuture = new CompletableFuture<>(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java index bb3e3fc3accf6..b830d375303bb 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java @@ -52,6 +52,8 @@ public class ProducerBuilderImplTest { public void setup() { Producer producer = mock(Producer.class); client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); producerBuilderImpl = new ProducerBuilderImpl<>(client, Schema.BYTES); when(client.newProducer()).thenReturn(producerBuilderImpl); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java index 32d0eff6e792e..27e2dcb37cee0 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java @@ -40,6 +40,8 @@ public void testIncrementNumAcksReceived() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); conf.setStatsIntervalSeconds(1); PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.getConfiguration()).thenReturn(conf); Timer timer = new HashedWheelTimer(); when(client.timer()).thenReturn(timer); @@ -60,6 +62,8 @@ public void testGetStatsAndCancelStatsTimeoutWithoutArriveUpdateInterval() { ClientConfigurationData conf = new ClientConfigurationData(); conf.setStatsIntervalSeconds(60); PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.getConfiguration()).thenReturn(conf); Timer timer = new HashedWheelTimer(); when(client.timer()).thenReturn(timer); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java index 54d13538d7867..e0b25db891247 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.mock; @@ -122,7 +123,7 @@ public void testConsumerIsClosed() throws Exception { when(cnx.ctx()).thenReturn(ctx); when(cnx.sendRequestWithId(any(ByteBuf.class), anyLong())) .thenReturn(CompletableFuture.completedFuture(mock(ProducerResponse.class))); - when(pool.getConnection(any(InetSocketAddress.class), any(InetSocketAddress.class))) + when(pool.getConnection(any(InetSocketAddress.class), any(InetSocketAddress.class), anyInt())) .thenReturn(CompletableFuture.completedFuture(cnx)); ClientConfigurationData conf = new ClientConfigurationData(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java index 9959a2038555c..eee8ba4e8f41a 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java @@ -52,6 +52,8 @@ public void setup() { Reader reader = mock(Reader.class); when(reader.readNextAsync()).thenReturn(CompletableFuture.allOf()); client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.newReader(any(Schema.class))) .thenReturn(new ReaderBuilderImpl(client, Schema.BYTES)); when(client.createReaderAsync(any(ReaderConfigurationData.class), any(Schema.class))) diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java index 68c886bc7211a..6a866034ddbf8 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java @@ -38,6 +38,8 @@ public class TableViewImplTest { @BeforeClass(alwaysRun = true) public void setup() { client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.newReader(any(Schema.class))) .thenReturn(new ReaderBuilderImpl(client, Schema.BYTES)); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java index 0d24d76023c87..4fa4284109d2e 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.client.impl; import io.netty.channel.ChannelHandlerContext; +import io.netty.util.HashedWheelTimer; +import io.netty.util.Timer; import org.apache.pulsar.client.impl.PatternMultiTopicsConsumerImpl.TopicsChangedListener; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.api.proto.BaseCommand; @@ -26,7 +28,9 @@ import org.apache.pulsar.common.api.proto.CommandWatchTopicUpdate; import org.apache.pulsar.common.naming.NamespaceName; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -50,18 +54,28 @@ public class TopicListWatcherTest { public void setup() { listener = mock(TopicsChangedListener.class); client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); + when(connectionPool.genRandomKeyToSelectCon()).thenReturn(0); when(client.getConfiguration()).thenReturn(new ClientConfigurationData()); clientCnxFuture = new CompletableFuture<>(); when(client.getConnectionToServiceUrl()).thenReturn(clientCnxFuture); + Timer timer = new HashedWheelTimer(); + when(client.timer()).thenReturn(timer); + String topic = "persistent://tenant/ns/topic\\d+"; + when(client.getConnection(topic)).thenReturn(clientCnxFuture); + when(client.getConnection(topic, 0)).thenReturn(clientCnxFuture); + when(client.getConnection(any(), any(), anyInt())).thenReturn(clientCnxFuture); + when(connectionPool.getConnection(any(), any(), anyInt())).thenReturn(clientCnxFuture); watcherFuture = new CompletableFuture<>(); watcher = new TopicListWatcher(listener, client, - Pattern.compile("persistent://tenant/ns/topic\\d+"), 7, - NamespaceName.get("tenant/ns"), null, watcherFuture); + Pattern.compile(topic), 7, + NamespaceName.get("tenant/ns"), null, watcherFuture, () -> {}); } @Test public void testWatcherGrabsConnection() { - verify(client).getConnectionToServiceUrl(); + verify(client).getConnection(anyString(), anyInt()); } @Test diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicMessageIdImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicMessageIdImplTest.java index d2e2ce9c15c54..1ddd47af91dff 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicMessageIdImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicMessageIdImplTest.java @@ -20,6 +20,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertSame; import org.testng.annotations.Test; @@ -28,9 +29,9 @@ public class TopicMessageIdImplTest { public void hashCodeTest() { MessageIdImpl msgId1 = new MessageIdImpl(0, 0, 0); MessageIdImpl msgId2 = new BatchMessageIdImpl(1, 1, 1, 1); - TopicMessageIdImpl topicMsgId1 = new TopicMessageIdImpl("topic-partition-1", "topic", msgId1); - TopicMessageIdImpl topic2MsgId1 = new TopicMessageIdImpl("topic2-partition-1", "topic2", msgId1); - TopicMessageIdImpl topicMsgId2 = new TopicMessageIdImpl("topic-partition-2", "topic", msgId2); + TopicMessageIdImpl topicMsgId1 = new TopicMessageIdImpl("topic-partition-1", msgId1); + TopicMessageIdImpl topic2MsgId1 = new TopicMessageIdImpl("topic2-partition-1", msgId1); + TopicMessageIdImpl topicMsgId2 = new TopicMessageIdImpl("topic-partition-2", msgId2); assertEquals(topicMsgId1.hashCode(), topicMsgId1.hashCode()); assertEquals(topic2MsgId1.hashCode(), topic2MsgId1.hashCode()); @@ -43,9 +44,9 @@ public void hashCodeTest() { public void equalsTest() { MessageIdImpl msgId1 = new MessageIdImpl(0, 0, 0); MessageIdImpl msgId2 = new BatchMessageIdImpl(1, 1, 1, 1); - TopicMessageIdImpl topicMsgId1 = new TopicMessageIdImpl("topic-partition-1", "topic", msgId1); - TopicMessageIdImpl topic2MsgId1 = new TopicMessageIdImpl("topic2-partition-1", "topic2", msgId1); - TopicMessageIdImpl topicMsgId2 = new TopicMessageIdImpl("topic-partition-2", "topic", msgId2); + TopicMessageIdImpl topicMsgId1 = new TopicMessageIdImpl("topic-partition-1", msgId1); + TopicMessageIdImpl topic2MsgId1 = new TopicMessageIdImpl("topic2-partition-1", msgId1); + TopicMessageIdImpl topicMsgId2 = new TopicMessageIdImpl("topic-partition-2", msgId2); assertEquals(topicMsgId1, topicMsgId1); assertEquals(topicMsgId1, topic2MsgId1); @@ -54,4 +55,12 @@ public void equalsTest() { assertNotEquals(topicMsgId1, topicMsgId2); } + @Test + public void testDeprecatedMethods() { + BatchMessageIdImpl msgId = new BatchMessageIdImpl(1, 2, 3, 4); + TopicMessageIdImpl topicMsgId = new TopicMessageIdImpl("topic-partition-0", "topic", msgId); + assertSame(topicMsgId.getInnerMessageId(), msgId); + assertEquals(topicMsgId.getTopicPartitionName(), topicMsgId.getOwnerTopic()); + assertEquals(topicMsgId.getTopicName(), "topic"); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java index 4ccc514e8e7f1..91ad321048226 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java @@ -29,12 +29,15 @@ import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; import io.netty.util.concurrent.DefaultThreadFactory; - +import java.time.Duration; import java.util.HashSet; import java.util.concurrent.TimeUnit; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.awaitility.Awaitility; import org.testng.annotations.Test; public class UnAckedMessageTrackerTest { @@ -42,6 +45,8 @@ public class UnAckedMessageTrackerTest { @Test public void testAddAndRemove() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); Timer timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-timer", Thread.currentThread().isDaemon()), 1, TimeUnit.MILLISECONDS); when(client.timer()).thenReturn(timer); @@ -77,4 +82,50 @@ public void testAddAndRemove() { timer.stop(); } + @Test + public void testTrackChunkedMessageId() { + PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); + Timer timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-timer", Thread.currentThread().isDaemon()), + 1, TimeUnit.MILLISECONDS); + when(client.timer()).thenReturn(timer); + + ConsumerBase consumer = mock(ConsumerBase.class); + doNothing().when(consumer).onAckTimeoutSend(any()); + doNothing().when(consumer).redeliverUnacknowledgedMessages(any()); + ConsumerConfigurationData conf = new ConsumerConfigurationData<>(); + conf.setAckTimeoutMillis(1000); + conf.setTickDurationMillis(1000); + UnAckedMessageTracker tracker = new UnAckedMessageTracker(client, consumer, conf); + + assertTrue(tracker.isEmpty()); + assertEquals(tracker.size(), 0); + + // Build chunked message ID + MessageIdImpl[] chunkMsgIds = new MessageIdImpl[5]; + for (int i = 0; i < 5; i++) { + chunkMsgIds[i] = new MessageIdImpl(1L, i, -1); + } + ChunkMessageIdImpl chunkedMessageId = + new ChunkMessageIdImpl(chunkMsgIds[0], chunkMsgIds[chunkMsgIds.length - 1]); + + consumer.unAckedChunkedMessageIdSequenceMap = + ConcurrentOpenHashMap.newBuilder().build(); + consumer.unAckedChunkedMessageIdSequenceMap.put(chunkedMessageId, chunkMsgIds); + + // Redeliver chunked message + tracker.add(chunkedMessageId); + + Awaitility.await() + .pollInterval(Duration.ofMillis(200)) + .atMost(Duration.ofSeconds(3)) + .untilAsserted(() -> assertEquals(tracker.size(), 0)); + + // Assert that all chunk message ID are removed from unAckedChunkedMessageIdSequenceMap + assertEquals(consumer.unAckedChunkedMessageIdSequenceMap.size(), 0); + + timer.stop(); + } + } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java index 3fcd6f12b982d..85012276d5af1 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java @@ -41,20 +41,20 @@ public class ProtobufSchemaTest { "\"namespace\":\"org.apache.pulsar.client.schema.proto.Test\"," + "\"fields\":[{\"name\":\"stringField\",\"type\":{\"type\":\"string\"," + "\"avro.java.string\":\"String\"},\"default\":\"\"},{\"name\":\"doubleField\"," + - "\"type\":\"double\",\"default\":0},{\"name\":\"intField\",\"type\":\"int\"," + + "\"type\":\"double\",\"default\":0.0},{\"name\":\"intField\",\"type\":\"int\"," + "\"default\":0},{\"name\":\"testEnum\",\"type\":{\"type\":\"enum\"," + "\"name\":\"TestEnum\",\"symbols\":[\"SHARED\",\"FAILOVER\"]}," + "\"default\":\"SHARED\"},{\"name\":\"nestedField\"," + "\"type\":[\"null\",{\"type\":\"record\",\"name\":\"SubMessage\"," + "\"fields\":[{\"name\":\"foo\",\"type\":{\"type\":\"string\"," + "\"avro.java.string\":\"String\"},\"default\":\"\"}" + - ",{\"name\":\"bar\",\"type\":\"double\",\"default\":0}]}]" + + ",{\"name\":\"bar\",\"type\":\"double\",\"default\":0.0}]}]" + ",\"default\":null},{\"name\":\"repeatedField\",\"type\":{\"type\":\"array\"" + ",\"items\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},\"default\":[]}" + ",{\"name\":\"externalMessage\",\"type\":[\"null\",{\"type\":\"record\"" + ",\"name\":\"ExternalMessage\",\"namespace\":\"org.apache.pulsar.client.schema.proto.ExternalTest\"" + ",\"fields\":[{\"name\":\"stringField\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}," + - "\"default\":\"\"},{\"name\":\"doubleField\",\"type\":\"double\",\"default\":0}]}],\"default\":null}]}"; + "\"default\":\"\"},{\"name\":\"doubleField\",\"type\":\"double\",\"default\":0.0}]}],\"default\":null}]}"; private static final String EXPECTED_PARSING_INFO = "{\"__alwaysAllowNull\":\"true\",\"__jsr310ConversionEnabled\":\"false\"," + "\"__PARSING_INFO__\":\"[{\\\"number\\\":1,\\\"name\\\":\\\"stringField\\\",\\\"type\\\":\\\"STRING\\\"," + diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java index 8959e67023463..bfd6af37e3ea6 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java @@ -27,6 +27,7 @@ import java.util.concurrent.CompletableFuture; import org.apache.pulsar.client.api.schema.SchemaDefinition; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.LookupService; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.schema.AvroSchema; @@ -46,6 +47,8 @@ public class MultiVersionSchemaInfoProviderTest { @BeforeMethod public void setup() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.getLookup()).thenReturn(mock(LookupService.class)); schemaProvider = new MultiVersionSchemaInfoProvider( TopicName.get("persistent://public/default/my-topic"), client); diff --git a/pulsar-common/pom.xml b/pulsar-common/pom.xml index 643b03a59c0f7..fad0b8a72df56 100644 --- a/pulsar-common/pom.xml +++ b/pulsar-common/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-common Pulsar Common Common libraries needed by client/broker/tools - ${project.groupId} pulsar-client-api ${project.version} - ${project.groupId} pulsar-client-admin-api ${project.version} - io.swagger swagger-annotations - org.slf4j slf4j-api - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.module jackson-module-parameter-names - com.fasterxml.jackson.datatype jackson-datatype-jsr310 - com.fasterxml.jackson.datatype jackson-datatype-jdk8 - com.google.guava guava - io.netty netty-handler - io.netty netty-resolver-dns - io.netty netty-transport-native-epoll linux-x86_64 - io.netty netty-transport-native-unix-common linux-x86_64 - org.apache.bookkeeper bookkeeper-common-allocator @@ -114,17 +97,14 @@ - org.apache.bookkeeper cpu-affinity - io.airlift aircompressor - org.apache.bookkeeper circe-checksum @@ -140,66 +120,54 @@ - io.netty netty-tcnative-boringssl-static - io.netty.incubator netty-incubator-transport-classes-io_uring - io.netty.incubator netty-incubator-transport-native-io_uring linux-x86_64 - io.netty.incubator netty-incubator-transport-native-io_uring linux-aarch_64 - io.netty netty-codec-haproxy - org.apache.commons commons-lang3 - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - javax.ws.rs javax.ws.rs-api - commons-io commons-io - com.github.spotbugs spotbugs-annotations provided true - com.google.protobuf - protobuf-java + protobuf-java - org.bouncycastle @@ -207,20 +175,17 @@ ${bouncycastle.bc-fips.version} test - org.lz4 lz4-java 1.5.0 test - com.github.luben zstd-jni test - org.xerial.snappy snappy-java @@ -230,19 +195,16 @@ com.google.code.gson gson - com.beust jcommander - org.awaitility awaitility test - @@ -252,7 +214,6 @@ ${pulsar.client.compiler.release} - org.gaul modernizer-maven-plugin @@ -270,7 +231,6 @@ - com.github.splunk.lightproto lightproto-maven-plugin @@ -283,7 +243,6 @@ - pl.project13.maven git-commit-id-plugin @@ -296,6 +255,7 @@ + false true git false @@ -307,7 +267,6 @@ - org.codehaus.mojo templating-maven-plugin diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/MessageIdAdv.java b/pulsar-common/src/main/java/org/apache/pulsar/client/api/MessageIdAdv.java similarity index 100% rename from pulsar-client-api/src/main/java/org/apache/pulsar/client/api/MessageIdAdv.java rename to pulsar-common/src/main/java/org/apache/pulsar/client/api/MessageIdAdv.java diff --git a/pulsar-common/src/main/java/org/apache/pulsar/client/api/package-info.java b/pulsar-common/src/main/java/org/apache/pulsar/client/api/package-info.java new file mode 100644 index 0000000000000..3f6d1d56e1032 --- /dev/null +++ b/pulsar-common/src/main/java/org/apache/pulsar/client/api/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Additional helper classes to the pulsar-client-api module. + */ +package org.apache.pulsar.client.api; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java b/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java index 526b0c96be05b..881ad424669d2 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java @@ -210,7 +210,7 @@ public static String jsonifySchemaInfo(SchemaInfo schemaInfo) { } /** - * Jsonify the schema info with verison. + * Jsonify the schema info with version. * * @param schemaInfoWithVersion the schema info * @return the jsonified schema info with version @@ -359,8 +359,7 @@ private static byte[] getKeyOrValueSchemaBytes(JsonElement jsonElement) { * @param keyValueSchemaInfoDataJsonBytes the key/value schema info data json bytes * @return the key/value schema info data bytes */ - public static byte[] convertKeyValueDataStringToSchemaInfoSchema( - byte[] keyValueSchemaInfoDataJsonBytes) throws IOException { + public static byte[] convertKeyValueDataStringToSchemaInfoSchema(byte[] keyValueSchemaInfoDataJsonBytes) { JsonObject jsonObject = (JsonObject) toJsonElement(new String(keyValueSchemaInfoDataJsonBytes, UTF_8)); byte[] keyBytes = getKeyOrValueSchemaBytes(jsonObject.get("key")); byte[] valueBytes = getKeyOrValueSchemaBytes(jsonObject.get("value")); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java b/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java index 8e2f51c4edbf0..a02208396fc91 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java @@ -102,7 +102,7 @@ public interface RawMessage { Optional getKey(); /** - * Get the schema verison of the message. + * Get the schema version of the message. * * @return the schema version of the message */ diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/naming/SystemTopicNames.java b/pulsar-common/src/main/java/org/apache/pulsar/common/naming/SystemTopicNames.java index 8fc7d014b5784..716d9bc31facb 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/naming/SystemTopicNames.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/naming/SystemTopicNames.java @@ -81,7 +81,7 @@ public static boolean isTopicPoliciesSystemTopic(String topic) { if (topic == null) { return false; } - return TopicName.get(topic).getLocalName().equals(NAMESPACE_EVENTS_LOCAL_NAME); + return TopicName.getPartitionedTopicName(topic).getLocalName().equals(NAMESPACE_EVENTS_LOCAL_NAME); } public static boolean isTransactionInternalName(TopicName topicName) { @@ -92,7 +92,7 @@ public static boolean isTransactionInternalName(TopicName topicName) { } public static boolean isSystemTopic(TopicName topicName) { - TopicName nonePartitionedTopicName = TopicName.get(topicName.getPartitionedTopicName()); - return isEventSystemTopic(nonePartitionedTopicName) || isTransactionInternalName(nonePartitionedTopicName); + TopicName nonPartitionedTopicName = TopicName.get(topicName.getPartitionedTopicName()); + return isEventSystemTopic(nonPartitionedTopicName) || isTransactionInternalName(nonPartitionedTopicName); } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java b/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java index bab9c443ceaeb..69d3aaeeebb4d 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java @@ -339,6 +339,42 @@ public String getPersistenceNamingEncoding() { } } + /** + * get topic full name from managedLedgerName. + * + * @return the topic full name, format -> domain://tenant/namespace/topic + */ + public static String fromPersistenceNamingEncoding(String mlName) { + // The managedLedgerName convention is: tenant/namespace/domain/topic + // We want to transform to topic full name in the order: domain://tenant/namespace/topic + if (mlName == null || mlName.length() == 0) { + return mlName; + } + List parts = Splitter.on("/").splitToList(mlName); + String tenant; + String cluster; + String namespacePortion; + String domain; + String localName; + if (parts.size() == 4) { + tenant = parts.get(0); + cluster = null; + namespacePortion = parts.get(1); + domain = parts.get(2); + localName = parts.get(3); + return String.format("%s://%s/%s/%s", domain, tenant, namespacePortion, localName); + } else if (parts.size() == 5) { + tenant = parts.get(0); + cluster = parts.get(1); + namespacePortion = parts.get(2); + domain = parts.get(3); + localName = parts.get(4); + return String.format("%s://%s/%s/%s/%s", domain, tenant, cluster, namespacePortion, localName); + } else { + throw new IllegalArgumentException("Invalid managedLedger name: " + mlName); + } + } + /** * Get a string suitable for completeTopicName lookup. * diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java index 620e1156d3555..9736d8b47ef71 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java @@ -154,6 +154,11 @@ public NarClassLoader run() { }); } + public static List getClasspathFromArchive(File narPath, String narExtractionDirectory) throws IOException { + File unpacked = NarUnpacker.unpackNar(narPath, getNarExtractionDirectory(narExtractionDirectory)); + return getClassPathEntries(unpacked); + } + private static File getNarExtractionDirectory(String configuredDirectory) { return new File(configuredDirectory + "/" + TMP_DIR_PREFIX); } @@ -164,16 +169,11 @@ private static File getNarExtractionDirectory(String configuredDirectory) { * @param narWorkingDirectory * directory to explode nar contents to * @param parent - * @throws IllegalArgumentException - * if the NAR is missing the Java Services API file for FlowFileProcessor implementations. - * @throws ClassNotFoundException - * if any of the FlowFileProcessor implementations defined by the Java Services API cannot be - * loaded. * @throws IOException * if an error occurs while loading the NAR. */ private NarClassLoader(final File narWorkingDirectory, Set additionalJars, ClassLoader parent) - throws ClassNotFoundException, IOException { + throws IOException { super(new URL[0], parent); this.narWorkingDirectory = narWorkingDirectory; @@ -238,22 +238,31 @@ public List getServiceImplementation(String serviceName) throws IOExcept * if the URL list could not be updated. */ private void updateClasspath(File root) throws IOException { - addURL(root.toURI().toURL()); // for compiled classes, META-INF/, etc. + getClassPathEntries(root).forEach(f -> { + try { + addURL(f.toURI().toURL()); + } catch (IOException e) { + log.error("Failed to add entry to classpath: {}", f, e); + } + }); + } + static List getClassPathEntries(File root) { + List classPathEntries = new ArrayList<>(); + classPathEntries.add(root); File dependencies = new File(root, "META-INF/bundled-dependencies"); if (!dependencies.isDirectory()) { - log.warn("{} does not contain META-INF/bundled-dependencies!", narWorkingDirectory); + log.warn("{} does not contain META-INF/bundled-dependencies!", root); } - addURL(dependencies.toURI().toURL()); + classPathEntries.add(dependencies); if (dependencies.isDirectory()) { final File[] jarFiles = dependencies.listFiles(JAR_FILTER); if (jarFiles != null) { Arrays.sort(jarFiles, Comparator.comparing(File::getName)); - for (File libJar : jarFiles) { - addURL(libJar.toURI().toURL()); - } + classPathEntries.addAll(Arrays.asList(jarFiles)); } } + return classPathEntries; } @Override diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java index 9bd5bc48df819..1e34c3e4fe706 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java @@ -32,13 +32,14 @@ import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; +import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Base64; import java.util.Enumeration; import java.util.concurrent.ConcurrentHashMap; -import java.util.jar.JarEntry; -import java.util.jar.JarFile; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; import lombok.extern.slf4j.Slf4j; /** @@ -113,18 +114,24 @@ static File doUnpackNar(final File nar, final File baseWorkingDirectory, Runnabl * if the NAR could not be unpacked. */ private static void unpack(final File nar, final File workingDirectory) throws IOException { - try (JarFile jarFile = new JarFile(nar)) { - Enumeration jarEntries = jarFile.entries(); - while (jarEntries.hasMoreElements()) { - JarEntry jarEntry = jarEntries.nextElement(); - String name = jarEntry.getName(); - File f = new File(workingDirectory, name); - if (jarEntry.isDirectory()) { + Path workingDirectoryPath = workingDirectory.toPath().normalize(); + try (ZipFile zipFile = new ZipFile(nar)) { + Enumeration zipEntries = zipFile.entries(); + while (zipEntries.hasMoreElements()) { + ZipEntry zipEntry = zipEntries.nextElement(); + String name = zipEntry.getName(); + Path targetFilePath = workingDirectoryPath.resolve(name).normalize(); + if (!targetFilePath.startsWith(workingDirectoryPath)) { + log.error("Invalid zip file with entry '{}'", name); + throw new IOException("Invalid zip file. Aborting unpacking."); + } + File f = targetFilePath.toFile(); + if (zipEntry.isDirectory()) { FileUtils.ensureDirectoryExistAndCanReadAndWrite(f); } else { // The directory entry might appear after the file entry FileUtils.ensureDirectoryExistAndCanReadAndWrite(f.getParentFile()); - makeFile(jarFile.getInputStream(jarEntry), f); + makeFile(zipFile.getInputStream(zipEntry), f); } } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java index fb33e3198aa60..51e181811c228 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java @@ -30,8 +30,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; import lombok.Data; import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -61,16 +64,30 @@ public class OffloadPoliciesImpl implements Serializable, OffloadPolicies { CONFIGURATION_FIELDS = Collections.unmodifiableList(temp); } + public static final ImmutableList INTERNAL_SUPPORTED_DRIVER = ImmutableList.of("S3", + "aws-s3", "google-cloud-storage", "filesystem", "azureblob", "aliyun-oss"); + public static final ImmutableList DRIVER_NAMES; + static { + String extraDrivers = System.getProperty("pulsar.extra.offload.drivers", ""); + if (extraDrivers.trim().isEmpty()) { + DRIVER_NAMES = INTERNAL_SUPPORTED_DRIVER; + } else { + DRIVER_NAMES = ImmutableList.builder() + .addAll(INTERNAL_SUPPORTED_DRIVER) + .addAll(Arrays.stream(StringUtils.split(extraDrivers, ',')) + .map(String::trim).collect(Collectors.toSet())).build(); + } + } + public static final int DEFAULT_MAX_BLOCK_SIZE_IN_BYTES = 64 * 1024 * 1024; // 64MB public static final int DEFAULT_READ_BUFFER_SIZE_IN_BYTES = 1024 * 1024; // 1MB public static final int DEFAULT_OFFLOAD_MAX_THREADS = 2; public static final int DEFAULT_OFFLOAD_MAX_PREFETCH_ROUNDS = 1; - public static final ImmutableList DRIVER_NAMES = ImmutableList - .of("S3", "aws-s3", "google-cloud-storage", "filesystem", "azureblob", "aliyun-oss"); public static final String DEFAULT_OFFLOADER_DIRECTORY = "./offloaders"; public static final Long DEFAULT_OFFLOAD_THRESHOLD_IN_BYTES = null; public static final Long DEFAULT_OFFLOAD_THRESHOLD_IN_SECONDS = null; public static final Long DEFAULT_OFFLOAD_DELETION_LAG_IN_MILLIS = null; + public static final String EXTRA_CONFIG_PREFIX = "managedLedgerOffloadExtraConfig"; public static final String OFFLOAD_THRESHOLD_NAME_IN_CONF_FILE = "managedLedgerOffloadAutoTriggerSizeThresholdBytes"; @@ -104,7 +121,9 @@ public class OffloadPoliciesImpl implements Serializable, OffloadPolicies { @Configuration @JsonProperty(access = JsonProperty.Access.READ_WRITE) private OffloadedReadPriority managedLedgerOffloadedReadPriority = DEFAULT_OFFLOADED_READ_PRIORITY; - + @Configuration + @JsonProperty(access = JsonProperty.Access.READ_WRITE) + private Map managedLedgerExtraConfigurations = new HashMap<>(); // s3 config, set by service configuration or cli @Configuration @JsonProperty(access = JsonProperty.Access.READ_WRITE) @@ -230,8 +249,7 @@ public static OffloadPoliciesImpl create(String driver, String region, String bu public static OffloadPoliciesImpl create(Properties properties) { OffloadPoliciesImpl data = new OffloadPoliciesImpl(); - Field[] fields = OffloadPoliciesImpl.class.getDeclaredFields(); - Arrays.stream(fields).forEach(f -> { + for (Field f : CONFIGURATION_FIELDS) { if (properties.containsKey(f.getName())) { try { f.setAccessible(true); @@ -242,7 +260,16 @@ public static OffloadPoliciesImpl create(Properties properties) { f.getName(), properties.get(f.getName())), e); } } - }); + } + + Map extraConfigurations = properties.entrySet().stream() + .filter(entry -> entry.getKey().toString().startsWith(EXTRA_CONFIG_PREFIX)) + .collect(Collectors.toMap( + entry -> entry.getKey().toString().replaceFirst(EXTRA_CONFIG_PREFIX, ""), + entry -> entry.getValue().toString())); + + data.getManagedLedgerExtraConfigurations().putAll(extraConfigurations); + data.compatibleWithBrokerConfigFile(properties); return data; } @@ -320,64 +347,21 @@ public boolean bucketValid() { public Properties toProperties() { Properties properties = new Properties(); - setProperty(properties, "managedLedgerOffloadedReadPriority", this.getManagedLedgerOffloadedReadPriority()); - setProperty(properties, "offloadersDirectory", this.getOffloadersDirectory()); - setProperty(properties, "managedLedgerOffloadDriver", this.getManagedLedgerOffloadDriver()); - setProperty(properties, "managedLedgerOffloadMaxThreads", - this.getManagedLedgerOffloadMaxThreads()); - setProperty(properties, "managedLedgerOffloadPrefetchRounds", - this.getManagedLedgerOffloadPrefetchRounds()); - setProperty(properties, "managedLedgerOffloadThresholdInBytes", - this.getManagedLedgerOffloadThresholdInBytes()); - setProperty(properties, "managedLedgerOffloadThresholdInSeconds", - this.getManagedLedgerOffloadThresholdInSeconds()); - setProperty(properties, "managedLedgerOffloadDeletionLagInMillis", - this.getManagedLedgerOffloadDeletionLagInMillis()); - - if (this.isS3Driver()) { - setProperty(properties, "s3ManagedLedgerOffloadRegion", - this.getS3ManagedLedgerOffloadRegion()); - setProperty(properties, "s3ManagedLedgerOffloadBucket", - this.getS3ManagedLedgerOffloadBucket()); - setProperty(properties, "s3ManagedLedgerOffloadServiceEndpoint", - this.getS3ManagedLedgerOffloadServiceEndpoint()); - setProperty(properties, "s3ManagedLedgerOffloadMaxBlockSizeInBytes", - this.getS3ManagedLedgerOffloadMaxBlockSizeInBytes()); - setProperty(properties, "s3ManagedLedgerOffloadCredentialId", - this.getS3ManagedLedgerOffloadCredentialId()); - setProperty(properties, "s3ManagedLedgerOffloadCredentialSecret", - this.getS3ManagedLedgerOffloadCredentialSecret()); - setProperty(properties, "s3ManagedLedgerOffloadRole", - this.getS3ManagedLedgerOffloadRole()); - setProperty(properties, "s3ManagedLedgerOffloadRoleSessionName", - this.getS3ManagedLedgerOffloadRoleSessionName()); - setProperty(properties, "s3ManagedLedgerOffloadReadBufferSizeInBytes", - this.getS3ManagedLedgerOffloadReadBufferSizeInBytes()); - } else if (this.isGcsDriver()) { - setProperty(properties, "gcsManagedLedgerOffloadRegion", - this.getGcsManagedLedgerOffloadRegion()); - setProperty(properties, "gcsManagedLedgerOffloadBucket", - this.getGcsManagedLedgerOffloadBucket()); - setProperty(properties, "gcsManagedLedgerOffloadMaxBlockSizeInBytes", - this.getGcsManagedLedgerOffloadMaxBlockSizeInBytes()); - setProperty(properties, "gcsManagedLedgerOffloadReadBufferSizeInBytes", - this.getGcsManagedLedgerOffloadReadBufferSizeInBytes()); - setProperty(properties, "gcsManagedLedgerOffloadServiceAccountKeyFile", - this.getGcsManagedLedgerOffloadServiceAccountKeyFile()); - } else if (this.isFileSystemDriver()) { - setProperty(properties, "fileSystemProfilePath", this.getFileSystemProfilePath()); - setProperty(properties, "fileSystemURI", this.getFileSystemURI()); - } - - setProperty(properties, "managedLedgerOffloadBucket", this.getManagedLedgerOffloadBucket()); - setProperty(properties, "managedLedgerOffloadRegion", this.getManagedLedgerOffloadRegion()); - setProperty(properties, "managedLedgerOffloadServiceEndpoint", - this.getManagedLedgerOffloadServiceEndpoint()); - setProperty(properties, "managedLedgerOffloadMaxBlockSizeInBytes", - this.getManagedLedgerOffloadMaxBlockSizeInBytes()); - setProperty(properties, "managedLedgerOffloadReadBufferSizeInBytes", - this.getManagedLedgerOffloadReadBufferSizeInBytes()); - + for (Field f : CONFIGURATION_FIELDS) { + try { + f.setAccessible(true); + if ("managedLedgerExtraConfigurations".equals(f.getName())) { + Map extraConfig = (Map) f.get(this); + extraConfig.forEach((key, value) -> { + setProperty(properties, EXTRA_CONFIG_PREFIX + key, value); + }); + } else { + setProperty(properties, f.getName(), f.get(this)); + } + } catch (Exception e) { + throw new IllegalArgumentException("An error occurred while processing the field: " + f.getName(), e); + } + } return properties; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java index 25fa666523f3c..d77764e679da0 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.common.policies.data.stats; +import com.fasterxml.jackson.annotation.JsonIgnore; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; @@ -134,6 +135,7 @@ public class SubscriptionStatsImpl implements SubscriptionStats { /** The size of InMemoryDelayedDeliveryTracer memory usage. */ public long delayedMessageIndexSizeInBytes; + @JsonIgnore public Map bucketDelayedIndexStats; /** SubscriptionProperties (key/value strings) associated with this subscribe. */ @@ -160,10 +162,13 @@ public void reset() { bytesOutCounter = 0; msgOutCounter = 0; msgRateRedeliver = 0; + messageAckRate = 0; + chunkedMessageRate = 0; msgBacklog = 0; backlogSize = 0; msgBacklogNoDelayed = 0; unackedMessages = 0; + type = null; msgRateExpired = 0; totalMsgExpired = 0; lastExpireTimestamp = 0L; @@ -172,6 +177,7 @@ public void reset() { consumersAfterMarkDeletePosition.clear(); nonContiguousDeletedMessagesRanges = 0; nonContiguousDeletedMessagesRangesSerializedSize = 0; + earliestMsgPublishTimeInBacklog = 0L; delayedMessageIndexSizeInBytes = 0; subscriptionProperties.clear(); filterProcessedMsgCount = 0; @@ -190,11 +196,14 @@ public SubscriptionStatsImpl add(SubscriptionStatsImpl stats) { this.bytesOutCounter += stats.bytesOutCounter; this.msgOutCounter += stats.msgOutCounter; this.msgRateRedeliver += stats.msgRateRedeliver; + this.messageAckRate += stats.messageAckRate; + this.chunkedMessageRate += stats.chunkedMessageRate; this.msgBacklog += stats.msgBacklog; this.backlogSize += stats.backlogSize; this.msgBacklogNoDelayed += stats.msgBacklogNoDelayed; this.msgDelayed += stats.msgDelayed; this.unackedMessages += stats.unackedMessages; + this.type = stats.type; this.msgRateExpired += stats.msgRateExpired; this.totalMsgExpired += stats.totalMsgExpired; this.isReplicated |= stats.isReplicated; @@ -213,6 +222,17 @@ public SubscriptionStatsImpl add(SubscriptionStatsImpl stats) { this.consumersAfterMarkDeletePosition.putAll(stats.consumersAfterMarkDeletePosition); this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges; this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize; + if (this.earliestMsgPublishTimeInBacklog != 0 && stats.earliestMsgPublishTimeInBacklog != 0) { + this.earliestMsgPublishTimeInBacklog = Math.min( + this.earliestMsgPublishTimeInBacklog, + stats.earliestMsgPublishTimeInBacklog + ); + } else { + this.earliestMsgPublishTimeInBacklog = Math.max( + this.earliestMsgPublishTimeInBacklog, + stats.earliestMsgPublishTimeInBacklog + ); + } this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes; this.subscriptionProperties.putAll(stats.subscriptionProperties); this.filterProcessedMsgCount += stats.filterProcessedMsgCount; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java index c9c4739b904f6..3bf43dbf41f0a 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java @@ -140,6 +140,7 @@ public class TopicStatsImpl implements TopicStats { public long delayedMessageIndexSizeInBytes; /** Map of bucket delayed index statistics. */ + @JsonIgnore public Map bucketDelayedIndexStats; /** The compaction stats. */ @@ -215,6 +216,7 @@ public void reset() { this.lastOffloadFailureTimeStamp = 0; this.lastOffloadSuccessTimeStamp = 0; this.publishRateLimitedTimes = 0L; + this.earliestMsgPublishTimeInBacklogs = 0L; this.delayedMessageIndexSizeInBytes = 0; this.compaction.reset(); this.ownerBroker = null; @@ -314,6 +316,17 @@ public TopicStatsImpl add(TopicStats ts) { } } } + if (earliestMsgPublishTimeInBacklogs != 0 && ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs != 0) { + earliestMsgPublishTimeInBacklogs = Math.min( + earliestMsgPublishTimeInBacklogs, + ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs + ); + } else { + earliestMsgPublishTimeInBacklogs = Math.max( + earliestMsgPublishTimeInBacklogs, + ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs + ); + } return this; } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java index cf0cd820a6d10..3982900041813 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java @@ -1555,6 +1555,9 @@ public static BaseCommand newWatchTopicList( return cmd; } + /*** + * @param topics topic names which are matching, the topic name contains the partition suffix. + */ public static BaseCommand newWatchTopicListSuccess(long requestId, long watcherId, String topicsHash, List topics) { BaseCommand cmd = localCmd(Type.WATCH_TOPIC_LIST_SUCCESS); @@ -1570,6 +1573,10 @@ public static BaseCommand newWatchTopicListSuccess(long requestId, long watcherI return cmd; } + /** + * @param deletedTopics topic names deleted(contains the partition suffix). + * @param newTopics topics names added(contains the partition suffix). + */ public static BaseCommand newWatchTopicUpdate(long watcherId, List newTopics, List deletedTopics, String topicsHash) { BaseCommand cmd = localCmd(Type.WATCH_TOPIC_UPDATE); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java index 2f0a7884dde35..b4e15f8cd1d75 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java @@ -19,36 +19,63 @@ package org.apache.pulsar.common.protocol; import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.ProtocolDetectionResult; import io.netty.handler.codec.ProtocolDetectionState; import io.netty.handler.codec.haproxy.HAProxyMessageDecoder; import io.netty.handler.codec.haproxy.HAProxyProtocolVersion; +import lombok.extern.slf4j.Slf4j; /** * Decoder that added whether a new connection is prefixed with the ProxyProtocol. * More about the ProxyProtocol see: http://www.haproxy.org/download/1.8/doc/proxy-protocol.txt. */ +@Slf4j public class OptionalProxyProtocolDecoder extends ChannelInboundHandlerAdapter { public static final String NAME = "optional-proxy-protocol-decoder"; + public static final int MIN_BYTES_SIZE_TO_DETECT_PROTOCOL = 12; + + private CompositeByteBuf cumulatedByteBuf; + @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof ByteBuf) { - ProtocolDetectionResult result = - HAProxyMessageDecoder.detectProtocol((ByteBuf) msg); - // should accumulate data if need more data to detect the protocol + // Combine cumulated buffers. + ByteBuf buf = (ByteBuf) msg; + if (cumulatedByteBuf != null) { + buf = cumulatedByteBuf.addComponent(true, buf); + } + + ProtocolDetectionResult result = HAProxyMessageDecoder.detectProtocol(buf); if (result.state() == ProtocolDetectionState.NEEDS_MORE_DATA) { + // Accumulate data if need more data to detect the protocol. + if (cumulatedByteBuf == null) { + cumulatedByteBuf = new CompositeByteBuf(ctx.alloc(), false, MIN_BYTES_SIZE_TO_DETECT_PROTOCOL, buf); + } return; } + cumulatedByteBuf = null; if (result.state() == ProtocolDetectionState.DETECTED) { ctx.pipeline().addAfter(NAME, null, new HAProxyMessageDecoder()); - ctx.pipeline().remove(this); } + ctx.pipeline().remove(this); + super.channelRead(ctx, buf); + } else { + super.channelRead(ctx, msg); + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + super.channelInactive(ctx); + if (cumulatedByteBuf != null) { + log.info("Release cumulated byte buffer when channel inactive."); + cumulatedByteBuf = null; } - super.channelRead(ctx, msg); } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java b/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java index 1f15beb8a0b92..8a8da0bb1ac93 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java @@ -99,6 +99,9 @@ public List generate() { Runtime r = Runtime.getRuntime(); + RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); + + m.put("jvm_start_time", runtimeMXBean.getStartTime()); m.put("jvm_heap_used", r.totalMemory() - r.freeMemory()); m.put("jvm_max_memory", r.maxMemory()); m.put("jvm_total_memory", r.totalMemory()); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicCompactionStrategy.java b/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicCompactionStrategy.java index f06374b234fc7..39bfa6d71bc96 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicCompactionStrategy.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicCompactionStrategy.java @@ -18,7 +18,11 @@ */ package org.apache.pulsar.common.topics; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.classification.InterfaceAudience; +import org.apache.pulsar.common.classification.InterfaceStability; /** * Defines a custom strategy to compact messages in a topic. @@ -43,8 +47,13 @@ * "topicCompactionStrategyClassName", strategy.getClass().getCanonicalName())) * .create(); */ +@InterfaceAudience.Private +@InterfaceStability.Unstable public interface TopicCompactionStrategy { + String TABLE_VIEW_TAG = "table-view"; + Map INSTANCES = new ConcurrentHashMap<>(); + /** * Returns the schema object for this strategy. * @return @@ -60,17 +69,27 @@ public interface TopicCompactionStrategy { */ boolean shouldKeepLeft(T prev, T cur); - static TopicCompactionStrategy load(String topicCompactionStrategyClassName) { + default void handleSkippedMessage(String key, T cur) { + } + + + static TopicCompactionStrategy load(String tag, String topicCompactionStrategyClassName) { if (topicCompactionStrategyClassName == null) { return null; } + try { Class clazz = Class.forName(topicCompactionStrategyClassName); - Object instance = clazz.getDeclaredConstructor().newInstance(); - return (TopicCompactionStrategy) instance; + TopicCompactionStrategy instance = (TopicCompactionStrategy) clazz.getDeclaredConstructor().newInstance(); + INSTANCES.put(tag, instance); + return instance; } catch (Exception e) { throw new IllegalArgumentException( "Error when loading topic compaction strategy: " + topicCompactionStrategyClassName, e); } } + + static TopicCompactionStrategy getInstance(String tag) { + return INSTANCES.get(tag); + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java b/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java index 250cea217ee5f..4c0a8d500b703 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java @@ -47,13 +47,16 @@ public static List filterTopics(List original, String regex) { } public static List filterTopics(List original, Pattern topicsPattern) { - final Pattern shortenedTopicsPattern = topicsPattern.toString().contains(SCHEME_SEPARATOR) - ? Pattern.compile(SCHEME_SEPARATOR_PATTERN.split(topicsPattern.toString())[1]) : topicsPattern; + final Pattern shortenedTopicsPattern = Pattern.compile(removeTopicDomainScheme(topicsPattern.toString())); return original.stream() .map(TopicName::get) + .filter(topicName -> { + String partitionedTopicName = topicName.getPartitionedTopicName(); + String removedScheme = SCHEME_SEPARATOR_PATTERN.split(partitionedTopicName)[1]; + return shortenedTopicsPattern.matcher(removedScheme).matches(); + }) .map(TopicName::toString) - .filter(topic -> shortenedTopicsPattern.matcher(SCHEME_SEPARATOR_PATTERN.split(topic)[1]).matches()) .collect(Collectors.toList()); } @@ -78,4 +81,16 @@ public static Set minus(Collection list1, Collection lis s1.removeAll(list2); return s1; } + + private static String removeTopicDomainScheme(String originalRegexp) { + if (!originalRegexp.toString().contains(SCHEME_SEPARATOR)) { + return originalRegexp; + } + String removedTopicDomain = SCHEME_SEPARATOR_PATTERN.split(originalRegexp.toString())[1]; + if (originalRegexp.contains("^")) { + return String.format("^%s", removedTopicDomain); + } else { + return removedTopicDomain; + } + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FieldParser.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FieldParser.java index 626a14b92eedd..8d1ae5294ff7b 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FieldParser.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FieldParser.java @@ -314,6 +314,9 @@ public static Float stringToFloat(String val) { * @return The converted list with type {@code }. */ public static List stringToList(String val, Class type) { + if (val == null) { + return null; + } String[] tokens = trim(val).split(","); return Arrays.stream(tokens).map(t -> { return convert(trim(t), type); @@ -330,6 +333,9 @@ public static List stringToList(String val, Class type) { * @return The converted set with type {@code }. */ public static Set stringToSet(String val, Class type) { + if (val == null) { + return null; + } String[] tokens = trim(val).split(","); return Arrays.stream(tokens).map(t -> { return convert(trim(t), type); @@ -337,6 +343,9 @@ public static Set stringToSet(String val, Class type) { } private static Map stringToMap(String strValue, Class keyType, Class valueType) { + if (strValue == null) { + return null; + } String[] tokens = trim(strValue).split(","); Map map = new HashMap<>(); for (String token : tokens) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java index 2b082b4a7899b..6f62589853593 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.common.util; +import com.google.common.util.concurrent.MoreExecutors; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -55,6 +56,11 @@ public static CompletableFuture waitForAll(Collection runWithCurrentThread(Runnable runnable) { + return CompletableFuture.runAsync( + () -> runnable.run(), MoreExecutors.directExecutor()); + } + public static CompletableFuture> waitForAll(Stream>> futures) { return futures.reduce(CompletableFuture.completedFuture(new ArrayList<>()), (pre, curr) -> pre.thenCompose(preV -> curr.thenApply(currV -> { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/LazyLoadableValue.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/LazyLoadableValue.java new file mode 100644 index 0000000000000..063d434a64fc0 --- /dev/null +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/LazyLoadableValue.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util; + +import java.util.function.Supplier; + +/*** + * Used to lazy load a value, only calculate it when used. Not thread-safety. + */ +public class LazyLoadableValue { + + private Supplier loader; + + private T value; + + public LazyLoadableValue(Supplier loader) { + this.loader = loader; + } + + public T getValue() { + if (value == null) { + value = loader.get(); + } + return value; + } +} diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java index 4ecb29b2462cc..17199664f12d1 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java @@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import lombok.Builder; +import lombok.extern.slf4j.Slf4j; /** * A Rate Limiter that distributes permits at a configurable rate. Each {@link #acquire()} blocks if necessary until a @@ -50,6 +51,7 @@ *

  • Faster: RateLimiter is light-weight and faster than Guava-RateLimiter
  • * */ +@Slf4j public class RateLimiter implements AutoCloseable{ private final ScheduledExecutorService executorService; private long rateTime; @@ -175,7 +177,10 @@ public synchronized boolean tryAcquire() { * @return {@code true} if the permits were acquired, {@code false} otherwise */ public synchronized boolean tryAcquire(long acquirePermit) { - checkArgument(!isClosed(), "Rate limiter is already shutdown"); + if (isClosed()) { + log.info("The current rate limiter is already shutdown, acquire permits directly."); + return true; + } // lazy init and start task only once application start using it if (renewTask == null) { renewTask = createTask(); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/ShutdownUtil.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/ShutdownUtil.java index b461cfab0d1bc..ff12484707123 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/ShutdownUtil.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/ShutdownUtil.java @@ -47,8 +47,11 @@ public class ShutdownUtil { * @see Runtime#halt(int) */ public static void triggerImmediateForcefulShutdown(int status) { + triggerImmediateForcefulShutdown(status, true); + } + public static void triggerImmediateForcefulShutdown(int status, boolean logging) { try { - if (status != 0) { + if (status != 0 && logging) { log.warn("Triggering immediate shutdown of current process with status {}", status, new Exception("Stacktrace for immediate shutdown")); } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java index 12ce7eb74c72b..b801d5f2b05a1 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java @@ -216,6 +216,14 @@ public static BitSetRecyclable valueOf(byte[] bytes) { return BitSetRecyclable.valueOf(ByteBuffer.wrap(bytes)); } + /** + * Copy a BitSetRecyclable. + */ + public static BitSetRecyclable valueOf(BitSetRecyclable src) { + // The internal implementation will do the array-copy. + return valueOf(src.words); + } + /** * Returns a new bit set containing all the bits in the given byte * buffer between its position and limit. diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java index 31b4cb7cbf152..f5b47e7f1ab7a 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java @@ -306,16 +306,17 @@ private static final class Section extends StampedLock { } V get(long key, int keyHash) { - int bucket = keyHash; - long stamp = tryOptimisticRead(); boolean acquiredLock = false; + // add local variable here, so OutOfBound won't happen + long[] keys = this.keys; + V[] values = this.values; + // calculate table.length as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, values.length); + try { while (true) { - int capacity = this.capacity; - bucket = signSafeMod(bucket, capacity); - // First try optimistic locking long storedKey = keys[bucket]; V storedValue = values[bucket]; @@ -333,16 +334,15 @@ V get(long key, int keyHash) { if (!acquiredLock) { stamp = readLock(); acquiredLock = true; + + // update local variable + keys = this.keys; + values = this.values; + bucket = signSafeMod(keyHash, values.length); storedKey = keys[bucket]; storedValue = values[bucket]; } - if (capacity != this.capacity) { - // There has been a rehashing. We need to restart the search - bucket = keyHash; - continue; - } - if (storedKey == key) { return storedValue != DeletedValue ? storedValue : null; } else if (storedValue == EmptyValue) { @@ -350,8 +350,7 @@ V get(long key, int keyHash) { return null; } } - - ++bucket; + bucket = (bucket + 1) & (values.length - 1); } } finally { if (acquiredLock) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java index c0ccad9b73d5b..c3babbb8d1103 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java @@ -284,6 +284,9 @@ public Map asMap() { // A section is a portion of the hash map that is covered by a single @SuppressWarnings("serial") private static final class Section extends StampedLock { + // Each item take up 4 continuous array space. + private static final int ITEM_SIZE = 4; + // Keys and values are stored interleaved in the table array private volatile long[] table; @@ -306,7 +309,7 @@ private static final class Section extends StampedLock { float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); this.initCapacity = this.capacity; - this.table = new long[4 * this.capacity]; + this.table = new long[ITEM_SIZE * this.capacity]; this.size = 0; this.usedBuckets = 0; this.autoShrink = autoShrink; @@ -322,7 +325,10 @@ private static final class Section extends StampedLock { LongPair get(long key1, long key2, int keyHash) { long stamp = tryOptimisticRead(); boolean acquiredLock = false; - int bucket = signSafeMod(keyHash, capacity); + // add local variable here, so OutOfBound won't happen + long[] table = this.table; + // calculate table.length / 4 as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); try { while (true) { @@ -345,8 +351,9 @@ LongPair get(long key1, long key2, int keyHash) { if (!acquiredLock) { stamp = readLock(); acquiredLock = true; - - bucket = signSafeMod(keyHash, capacity); + // update local variable + table = this.table; + bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); storedKey1 = table[bucket]; storedKey2 = table[bucket + 1]; storedValue1 = table[bucket + 2]; @@ -361,7 +368,7 @@ LongPair get(long key1, long key2, int keyHash) { } } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (acquiredLock) { @@ -413,7 +420,7 @@ boolean put(long key1, long key2, long value1, long value2, int keyHash, boolean } } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (usedBuckets > resizeThresholdUp) { @@ -454,7 +461,7 @@ private boolean remove(long key1, long key2, long value1, long value2, int keyHa return false; } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { @@ -480,7 +487,7 @@ private boolean remove(long key1, long key2, long value1, long value2, int keyHa } private void cleanBucket(int bucket) { - int nextInArray = (bucket + 4) & (table.length - 1); + int nextInArray = (bucket + ITEM_SIZE) & (table.length - 1); if (table[nextInArray] == EmptyKey) { table[bucket] = EmptyKey; table[bucket + 1] = EmptyKey; @@ -489,7 +496,7 @@ private void cleanBucket(int bucket) { --usedBuckets; // Cleanup all the buckets that were in `DeletedKey` state, so that we can reduce unnecessary expansions - bucket = (bucket - 4) & (table.length - 1); + bucket = (bucket - ITEM_SIZE) & (table.length - 1); while (table[bucket] == DeletedKey) { table[bucket] = EmptyKey; table[bucket + 1] = EmptyKey; @@ -497,7 +504,7 @@ private void cleanBucket(int bucket) { table[bucket + 3] = ValueNotFound; --usedBuckets; - bucket = (bucket - 4) & (table.length - 1); + bucket = (bucket - ITEM_SIZE) & (table.length - 1); } } else { table[bucket] = DeletedKey; @@ -540,7 +547,7 @@ public void forEach(BiConsumerLongPair processor) { } // Go through all the buckets for this section - for (int bucket = 0; bucket < table.length; bucket += 4) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { long storedKey1 = table[bucket]; long storedKey2 = table[bucket + 1]; long storedValue1 = table[bucket + 2]; @@ -569,11 +576,11 @@ public void forEach(BiConsumerLongPair processor) { } private void rehash(int newCapacity) { - long[] newTable = new long[4 * newCapacity]; + long[] newTable = new long[ITEM_SIZE * newCapacity]; Arrays.fill(newTable, EmptyKey); // Re-hash table - for (int i = 0; i < table.length; i += 4) { + for (int i = 0; i < table.length; i += ITEM_SIZE) { long storedKey1 = table[i]; long storedKey2 = table[i + 1]; long storedValue1 = table[i + 2]; @@ -593,7 +600,7 @@ private void rehash(int newCapacity) { } private void shrinkToInitCapacity() { - long[] newTable = new long[4 * initCapacity]; + long[] newTable = new long[ITEM_SIZE * initCapacity]; Arrays.fill(newTable, EmptyKey); table = newTable; @@ -622,7 +629,7 @@ private static void insertKeyValueNoLock(long[] table, int capacity, long key1, return; } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } } @@ -641,6 +648,8 @@ static final long hash(long key1, long key2) { } static final int signSafeMod(long n, int max) { + // as the ITEM_SIZE of Section is 4, so the index is the multiple of 4 + // that is to left shift 2 bits return (int) (n & (max - 1)) << 2; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java index 2a1090503857c..f0f04c4edf904 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java @@ -294,6 +294,9 @@ public Set items(int numberOfItems, LongPairFunction longPairConverter // A section is a portion of the hash map that is covered by a single @SuppressWarnings("serial") private static final class Section extends StampedLock { + // Each item take up 2 continuous array space. + private static final int ITEM_SIZE = 2; + // Keys and values are stored interleaved in the table array private volatile long[] table; @@ -315,7 +318,7 @@ private static final class Section extends StampedLock { float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); this.initCapacity = this.capacity; - this.table = new long[2 * this.capacity]; + this.table = new long[ITEM_SIZE * this.capacity]; this.size = 0; this.usedBuckets = 0; this.autoShrink = autoShrink; @@ -331,7 +334,11 @@ private static final class Section extends StampedLock { boolean contains(long item1, long item2, int hash) { long stamp = tryOptimisticRead(); boolean acquiredLock = false; - int bucket = signSafeMod(hash, capacity); + + // add local variable here, so OutOfBound won't happen + long[] table = this.table; + // calculate table.length / 2 as capacity to avoid rehash changing capacity + int bucket = signSafeMod(hash, table.length / ITEM_SIZE); try { while (true) { @@ -353,7 +360,9 @@ boolean contains(long item1, long item2, int hash) { stamp = readLock(); acquiredLock = true; - bucket = signSafeMod(hash, capacity); + // update local variable + table = this.table; + bucket = signSafeMod(hash, table.length / ITEM_SIZE); storedItem1 = table[bucket]; storedItem2 = table[bucket + 1]; } @@ -366,7 +375,7 @@ boolean contains(long item1, long item2, int hash) { } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (acquiredLock) { @@ -410,7 +419,7 @@ boolean add(long item1, long item2, long hash) { } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (usedBuckets > resizeThresholdUp) { @@ -445,7 +454,7 @@ private boolean remove(long item1, long item2, int hash) { return false; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { tryShrinkThenUnlock(stamp); @@ -459,7 +468,7 @@ private int removeIf(LongPairPredicate filter) { // Go through all the buckets for this section long stamp = writeLock(); try { - for (int bucket = 0; bucket < table.length; bucket += 2) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { long storedItem1 = table[bucket]; long storedItem2 = table[bucket + 1]; if (storedItem1 != DeletedItem && storedItem1 != EmptyItem) { @@ -498,7 +507,7 @@ private void tryShrinkThenUnlock(long stamp) { } private void cleanBucket(int bucket) { - int nextInArray = (bucket + 2) & (table.length - 1); + int nextInArray = (bucket + ITEM_SIZE) & (table.length - 1); if (table[nextInArray] == EmptyItem) { table[bucket] = EmptyItem; table[bucket + 1] = EmptyItem; @@ -506,13 +515,13 @@ private void cleanBucket(int bucket) { // Cleanup all the buckets that were in `DeletedItem` state, // so that we can reduce unnecessary expansions - int lastBucket = (bucket - 2) & (table.length - 1); + int lastBucket = (bucket - ITEM_SIZE) & (table.length - 1); while (table[lastBucket] == DeletedItem) { table[lastBucket] = EmptyItem; table[lastBucket + 1] = EmptyItem; --usedBuckets; - lastBucket = (lastBucket - 2) & (table.length - 1); + lastBucket = (lastBucket - ITEM_SIZE) & (table.length - 1); } } else { table[bucket] = DeletedItem; @@ -542,7 +551,7 @@ public void forEach(LongPairConsumer processor) { // Go through all the buckets for this section. We try to renew the stamp only after a validation // error, otherwise we keep going with the same. long stamp = 0; - for (int bucket = 0; bucket < table.length; bucket += 2) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { if (stamp == 0) { stamp = tryOptimisticRead(); } @@ -572,11 +581,11 @@ public void forEach(LongPairConsumer processor) { private void rehash(int newCapacity) { // Expand the hashmap - long[] newTable = new long[2 * newCapacity]; + long[] newTable = new long[ITEM_SIZE * newCapacity]; Arrays.fill(newTable, EmptyItem); // Re-hash table - for (int i = 0; i < table.length; i += 2) { + for (int i = 0; i < table.length; i += ITEM_SIZE) { long storedItem1 = table[i]; long storedItem2 = table[i + 1]; if (storedItem1 != EmptyItem && storedItem1 != DeletedItem) { @@ -595,7 +604,7 @@ private void rehash(int newCapacity) { private void shrinkToInitCapacity() { // Expand the hashmap - long[] newTable = new long[2 * initCapacity]; + long[] newTable = new long[ITEM_SIZE * initCapacity]; Arrays.fill(newTable, EmptyItem); table = newTable; @@ -621,7 +630,7 @@ private static void insertKeyValueNoLock(long[] table, int capacity, long item1, return; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } } @@ -640,6 +649,8 @@ static final long hash(long key1, long key2) { } static final int signSafeMod(long n, int max) { + // as the ITEM_SIZE of Section is 2, so the index is the multiple of 2 + // that is to left shift 1 bit return (int) (n & (max - 1)) << 1; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java index ea2e01768ac7e..1aa95d3090eb2 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java @@ -296,6 +296,9 @@ public List values() { // A section is a portion of the hash map that is covered by a single @SuppressWarnings("serial") private static final class Section extends StampedLock { + // Each item take up 2 continuous array space. + private static final int ITEM_SIZE = 2; + // Keys and values are stored interleaved in the table array private volatile Object[] table; @@ -317,7 +320,7 @@ private static final class Section extends StampedLock { float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); this.initCapacity = this.capacity; - this.table = new Object[2 * this.capacity]; + this.table = new Object[ITEM_SIZE * this.capacity]; this.size = 0; this.usedBuckets = 0; this.autoShrink = autoShrink; @@ -332,7 +335,11 @@ private static final class Section extends StampedLock { V get(K key, int keyHash) { long stamp = tryOptimisticRead(); boolean acquiredLock = false; - int bucket = signSafeMod(keyHash, capacity); + + // add local variable here, so OutOfBound won't happen + Object[] table = this.table; + // calculate table.length / 2 as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); try { while (true) { @@ -354,7 +361,9 @@ V get(K key, int keyHash) { stamp = readLock(); acquiredLock = true; - bucket = signSafeMod(keyHash, capacity); + // update local variable + table = this.table; + bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); storedKey = (K) table[bucket]; storedValue = (V) table[bucket + 1]; } @@ -367,7 +376,7 @@ V get(K key, int keyHash) { } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (acquiredLock) { @@ -420,7 +429,7 @@ V put(K key, V value, int keyHash, boolean onlyIfAbsent, Function valuePro } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (usedBuckets > resizeThresholdUp) { @@ -449,7 +458,7 @@ private V remove(K key, Object value, int keyHash) { if (value == null || value.equals(storedValue)) { SIZE_UPDATER.decrementAndGet(this); - int nextInArray = (bucket + 2) & (table.length - 1); + int nextInArray = (bucket + ITEM_SIZE) & (table.length - 1); if (table[nextInArray] == EmptyKey) { table[bucket] = EmptyKey; table[bucket + 1] = null; @@ -457,13 +466,13 @@ private V remove(K key, Object value, int keyHash) { // Cleanup all the buckets that were in `DeletedKey` state, // so that we can reduce unnecessary expansions - int lastBucket = (bucket - 2) & (table.length - 1); + int lastBucket = (bucket - ITEM_SIZE) & (table.length - 1); while (table[lastBucket] == DeletedKey) { table[lastBucket] = EmptyKey; table[lastBucket + 1] = null; --usedBuckets; - lastBucket = (lastBucket - 2) & (table.length - 1); + lastBucket = (lastBucket - ITEM_SIZE) & (table.length - 1); } } else { table[bucket] = DeletedKey; @@ -479,7 +488,7 @@ private V remove(K key, Object value, int keyHash) { return null; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { @@ -528,7 +537,7 @@ public void forEach(BiConsumer processor) { // Go through all the buckets for this section. We try to renew the stamp only after a validation // error, otherwise we keep going with the same. long stamp = 0; - for (int bucket = 0; bucket < table.length; bucket += 2) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { if (stamp == 0) { stamp = tryOptimisticRead(); } @@ -558,10 +567,10 @@ public void forEach(BiConsumer processor) { private void rehash(int newCapacity) { // Expand the hashmap - Object[] newTable = new Object[2 * newCapacity]; + Object[] newTable = new Object[ITEM_SIZE * newCapacity]; // Re-hash table - for (int i = 0; i < table.length; i += 2) { + for (int i = 0; i < table.length; i += ITEM_SIZE) { K storedKey = (K) table[i]; V storedValue = (V) table[i + 1]; if (storedKey != EmptyKey && storedKey != DeletedKey) { @@ -577,7 +586,7 @@ private void rehash(int newCapacity) { } private void shrinkToInitCapacity() { - Object[] newTable = new Object[2 * initCapacity]; + Object[] newTable = new Object[ITEM_SIZE * initCapacity]; table = newTable; size = 0; @@ -602,7 +611,7 @@ private static void insertKeyValueNoLock(Object[] table, int capacity, K return; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } } @@ -618,6 +627,8 @@ static final long hash(K key) { } static final int signSafeMod(long n, int max) { + // as the ITEM_SIZE of Section is 2, so the index is the multiple of 2 + // that is to left shift 1 bit return (int) (n & (max - 1)) << 1; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java index cc8bc07b43095..162cbed5a5ddc 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java @@ -294,16 +294,16 @@ private static final class Section extends StampedLock { } boolean contains(V value, int keyHash) { - int bucket = keyHash; - long stamp = tryOptimisticRead(); boolean acquiredLock = false; + // add local variable here, so OutOfBound won't happen + V[] values = this.values; + // calculate table.length as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, values.length); + try { while (true) { - int capacity = this.capacity; - bucket = signSafeMod(bucket, capacity); - // First try optimistic locking V storedValue = values[bucket]; @@ -321,15 +321,12 @@ boolean contains(V value, int keyHash) { stamp = readLock(); acquiredLock = true; + // update local variable + values = this.values; + bucket = signSafeMod(keyHash, values.length); storedValue = values[bucket]; } - if (capacity != this.capacity) { - // There has been a rehashing. We need to restart the search - bucket = keyHash; - continue; - } - if (value.equals(storedValue)) { return true; } else if (storedValue == EmptyValue) { @@ -337,8 +334,7 @@ boolean contains(V value, int keyHash) { return false; } } - - ++bucket; + bucket = (bucket + 1) & (values.length - 1); } } finally { if (acquiredLock) { diff --git a/pulsar-common/src/main/resources/findbugsExclude.xml b/pulsar-common/src/main/resources/findbugsExclude.xml index df161c4b621a7..8216948c12408 100644 --- a/pulsar-common/src/main/resources/findbugsExclude.xml +++ b/pulsar-common/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/naming/SystemTopicNamesTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/naming/SystemTopicNamesTest.java new file mode 100644 index 0000000000000..92d93021973b1 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/naming/SystemTopicNamesTest.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.naming; + +import static org.testng.AssertJUnit.assertEquals; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Test +public class SystemTopicNamesTest { + + @DataProvider(name = "topicPoliciesSystemTopicNames") + public static Object[][] topicPoliciesSystemTopicNames() { + return new Object[][] { + {"persistent://public/default/__change_events", true}, + {"persistent://public/default/__change_events-partition-0", true}, + {"persistent://random-tenant/random-ns/__change_events", true}, + {"persistent://random-tenant/random-ns/__change_events-partition-1", true}, + {"persistent://public/default/not_really__change_events", false}, + {"persistent://public/default/__change_events-diff-suffix", false}, + {"persistent://a/b/not_really__change_events", false}, + }; + } + + @Test(dataProvider = "topicPoliciesSystemTopicNames") + public void testIsTopicPoliciesSystemTopic(String topicName, boolean expectedResult) { + assertEquals(expectedResult, SystemTopicNames.isTopicPoliciesSystemTopic(topicName)); + assertEquals(expectedResult, SystemTopicNames.isSystemTopic(TopicName.get(topicName))); + assertEquals(expectedResult, SystemTopicNames.isEventSystemTopic(TopicName.get(topicName))); + } +} diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/naming/TopicNameTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/naming/TopicNameTest.java index 0e4533e7a08d0..6d3ef7599d30e 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/naming/TopicNameTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/naming/TopicNameTest.java @@ -236,6 +236,40 @@ public void testDecodeEncode() throws Exception { assertEquals(name.getPersistenceNamingEncoding(), "prop/colo/ns/persistent/" + encodedName); } + @Test + public void testFromPersistenceNamingEncoding() { + // case1: V2 + String mlName1 = "public_tenant/default_namespace/persistent/test_topic"; + String expectedTopicName1 = "persistent://public_tenant/default_namespace/test_topic"; + + TopicName name1 = TopicName.get(expectedTopicName1); + assertEquals(name1.getPersistenceNamingEncoding(), mlName1); + assertEquals(TopicName.fromPersistenceNamingEncoding(mlName1), expectedTopicName1); + + // case2: V1 + String mlName2 = "public_tenant/my_cluster/default_namespace/persistent/test_topic"; + String expectedTopicName2 = "persistent://public_tenant/my_cluster/default_namespace/test_topic"; + + TopicName name2 = TopicName.get(expectedTopicName2); + assertEquals(name2.getPersistenceNamingEncoding(), mlName2); + assertEquals(TopicName.fromPersistenceNamingEncoding(mlName2), expectedTopicName2); + + // case3: null + String mlName3 = ""; + String expectedTopicName3 = ""; + assertEquals(expectedTopicName3, TopicName.fromPersistenceNamingEncoding(mlName3)); + + // case4: Invalid name + try { + String mlName4 = "public_tenant/my_cluster/default_namespace/persistent/test_topic/sub_topic"; + TopicName.fromPersistenceNamingEncoding(mlName4); + fail("Should have raised exception"); + } catch (IllegalArgumentException e) { + // Exception is expected. + } + } + + @SuppressWarnings("deprecation") @Test public void testTopicNameWithoutCluster() throws Exception { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java index c6c5ee180f69a..f93afac2ff93b 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java @@ -46,7 +46,7 @@ public class NarUnpackerTest { public void createSampleZipFile() throws IOException { sampleZipFile = Files.createTempFile("sample", ".zip").toFile(); try (ZipOutputStream out = new ZipOutputStream(new FileOutputStream(sampleZipFile))) { - for (int i = 0; i < 10000; i++) { + for (int i = 0; i < 5000; i++) { ZipEntry e = new ZipEntry("hello" + i + ".txt"); out.putNextEntry(e); byte[] msg = "hello world!".getBytes(StandardCharsets.UTF_8); @@ -58,12 +58,20 @@ public void createSampleZipFile() throws IOException { } @AfterMethod(alwaysRun = true) - void deleteSampleZipFile() throws IOException { - if (sampleZipFile != null) { - sampleZipFile.delete(); + void deleteSampleZipFile() { + if (sampleZipFile != null && sampleZipFile.exists()) { + try { + sampleZipFile.delete(); + } catch (Exception e) { + log.warn("Failed to delete file {}", sampleZipFile, e); + } } - if (extractDirectory != null) { - FileUtils.deleteFile(extractDirectory, true); + if (extractDirectory != null && extractDirectory.exists()) { + try { + FileUtils.deleteFile(extractDirectory, true); + } catch (IOException e) { + log.warn("Failed to delete directory {}", extractDirectory, e); + } } } @@ -111,7 +119,7 @@ public static void main(String[] args) { @Test void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { - int processes = 10; + int processes = 5; String javaExePath = findJavaExe().getAbsolutePath(); CountDownLatch countDownLatch = new CountDownLatch(processes); AtomicInteger exceptionCounter = new AtomicInteger(); @@ -122,7 +130,9 @@ void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { // fork a new process with the same classpath Process process = new ProcessBuilder() .command(javaExePath, - "-Xmx64m", + "-Xmx96m", + "-XX:TieredStopAtLevel=1", + "-Dlog4j2.disable.jmx=true", "-cp", System.getProperty("java.class.path"), // use NarUnpackerWorker as the main class @@ -130,6 +140,7 @@ void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { // pass arguments to use for testing sampleZipFile.getAbsolutePath(), extractDirectory.getAbsolutePath()) + .redirectErrorStream(true) .start(); String output = IOUtils.toString(process.getInputStream(), StandardCharsets.UTF_8); int retval = process.waitFor(); @@ -147,7 +158,7 @@ void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { } }).start(); } - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS), "All processes should finish before timeout"); assertEquals(exceptionCounter.get(), 0); assertEquals(extractCounter.get(), 1); } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java index 88036b1688437..bbede4e982044 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java @@ -18,6 +18,18 @@ */ package org.apache.pulsar.common.policies.data; +import static org.apache.pulsar.common.policies.data.OffloadPoliciesImpl.EXTRA_CONFIG_PREFIX; +import static org.testng.Assert.assertEquals; +import java.io.DataInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Method; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; import java.util.Properties; import org.testng.Assert; import org.testng.annotations.Test; @@ -365,4 +377,99 @@ public void brokerPropertyCompatibleTest() { Assert.assertEquals(offloadPolicies.getManagedLedgerOffloadDeletionLagInMillis(), brokerDeletionLag); Assert.assertEquals(offloadPolicies.getManagedLedgerOffloadedReadPriority().toString(), brokerReadPriority); } + + @Test + public void testSupportExtraOffloadDrivers() throws Exception { + System.setProperty("pulsar.extra.offload.drivers", "driverA, driverB"); + // using the custom classloader to reload the offload policies class to read the + // system property correctly. + TestClassLoader loader = new TestClassLoader(); + Class clazz = loader.loadClass("org.apache.pulsar.common.policies.data.OffloadPoliciesImpl"); + Object o = clazz.getDeclaredConstructor().newInstance(); + clazz.getDeclaredMethod("setManagedLedgerOffloadDriver", String.class).invoke(o, "driverA"); + Method method = clazz.getDeclaredMethod("driverSupported"); + Assert.assertEquals(method.invoke(o), true); + clazz.getDeclaredMethod("setManagedLedgerOffloadDriver", String.class).invoke(o, "driverB"); + Assert.assertEquals(method.invoke(o), true); + clazz.getDeclaredMethod("setManagedLedgerOffloadDriver", String.class).invoke(o, "driverC"); + Assert.assertEquals(method.invoke(o), false); + clazz.getDeclaredMethod("setManagedLedgerOffloadDriver", String.class).invoke(o, "aws-s3"); + Assert.assertEquals(method.invoke(o), true); + } + + // this is used for the testSupportExtraOffloadDrivers. Because we need to change the system property, + // we need to reload the class to read the system property. + static class TestClassLoader extends ClassLoader { + @Override + public Class loadClass(String name) throws ClassNotFoundException { + if (name.contains("OffloadPoliciesImpl")) { + return getClass(name); + } + return super.loadClass(name); + } + + private Class getClass(String name) { + String file = name.replace('.', File.separatorChar) + ".class"; + Path targetPath = Paths.get(getClass().getClassLoader().getResource(".").getPath()).getParent(); + file = Paths.get(targetPath.toString(), "classes", file).toString(); + byte[] byteArr = null; + try { + byteArr = loadClassData(file); + Class c = defineClass(name, byteArr, 0, byteArr.length); + resolveClass(c); + return c; + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } + + private byte[] loadClassData(String name) throws IOException { + InputStream stream = Files.newInputStream(Paths.get(name)); + int size = stream.available(); + byte buff[] = new byte[size]; + DataInputStream in = new DataInputStream(stream); + // Reading the binary data + in.readFully(buff); + in.close(); + return buff; + } + } + + @Test + public void testCreateOffloadPoliciesWithExtraConfiguration() { + Properties properties = new Properties(); + properties.put(EXTRA_CONFIG_PREFIX + "Key1", "value1"); + properties.put(EXTRA_CONFIG_PREFIX + "Key2", "value2"); + OffloadPoliciesImpl policies = OffloadPoliciesImpl.create(properties); + + Map extraConfigurations = policies.getManagedLedgerExtraConfigurations(); + Assert.assertEquals(extraConfigurations.size(), 2); + Assert.assertEquals(extraConfigurations.get("Key1"), "value1"); + Assert.assertEquals(extraConfigurations.get("Key2"), "value2"); + } + + /** + * Test toProperties as well as create from properties. + * @throws Exception + */ + @Test + public void testToProperties() throws Exception { + // Base information convert. + OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.create("aws-s3", "test-region", "test-bucket", + "http://test.endpoint", null, null, null, null, 32 * 1024 * 1024, 5 * 1024 * 1024, + 10 * 1024 * 1024L, 100L, 10000L, OffloadedReadPriority.TIERED_STORAGE_FIRST); + assertEquals(offloadPolicies, OffloadPoliciesImpl.create(offloadPolicies.toProperties())); + + // Set useless config to offload policies. Make sure convert conversion result is the same. + offloadPolicies.setFileSystemProfilePath("/test/file"); + assertEquals(offloadPolicies, OffloadPoliciesImpl.create(offloadPolicies.toProperties())); + + // Set extra config to offload policies. Make sure convert conversion result is the same. + Map extraConfiguration = new HashMap<>(); + extraConfiguration.put("key1", "value1"); + extraConfiguration.put("key2", "value2"); + offloadPolicies.setManagedLedgerExtraConfigurations(extraConfiguration); + assertEquals(offloadPolicies, OffloadPoliciesImpl.create(offloadPolicies.toProperties())); + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImplTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImplTest.java new file mode 100644 index 0000000000000..8a4b5da9edd20 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImplTest.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.policies.data.stats; + +import static org.testng.Assert.assertEquals; +import org.testng.annotations.Test; + +public class SubscriptionStatsImplTest { + + @Test + public void testReset() { + SubscriptionStatsImpl stats = new SubscriptionStatsImpl(); + stats.earliestMsgPublishTimeInBacklog = 1L; + stats.reset(); + assertEquals(stats.earliestMsgPublishTimeInBacklog, 0L); + + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_Earliest() { + SubscriptionStatsImpl stats1 = new SubscriptionStatsImpl(); + stats1.earliestMsgPublishTimeInBacklog = 10L; + + SubscriptionStatsImpl stats2 = new SubscriptionStatsImpl(); + stats2.earliestMsgPublishTimeInBacklog = 20L; + + SubscriptionStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklog, 10L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_First0() { + SubscriptionStatsImpl stats1 = new SubscriptionStatsImpl(); + stats1.earliestMsgPublishTimeInBacklog = 0L; + + SubscriptionStatsImpl stats2 = new SubscriptionStatsImpl(); + stats2.earliestMsgPublishTimeInBacklog = 20L; + + SubscriptionStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklog, 20L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_Second0() { + SubscriptionStatsImpl stats1 = new SubscriptionStatsImpl(); + stats1.earliestMsgPublishTimeInBacklog = 10L; + + SubscriptionStatsImpl stats2 = new SubscriptionStatsImpl(); + stats2.earliestMsgPublishTimeInBacklog = 0L; + + SubscriptionStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklog, 10L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_Zero() { + SubscriptionStatsImpl stats1 = new SubscriptionStatsImpl(); + stats1.earliestMsgPublishTimeInBacklog = 0L; + + SubscriptionStatsImpl stats2 = new SubscriptionStatsImpl(); + stats2.earliestMsgPublishTimeInBacklog = 0L; + + SubscriptionStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklog, 0L); + } +} \ No newline at end of file diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImplTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImplTest.java new file mode 100644 index 0000000000000..09cef4c4d0f82 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImplTest.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.policies.data.stats; + +import static org.testng.Assert.assertEquals; +import org.testng.annotations.Test; + +public class TopicStatsImplTest { + + @Test + public void testReset() { + TopicStatsImpl stats = new TopicStatsImpl(); + stats.earliestMsgPublishTimeInBacklogs = 1L; + stats.reset(); + assertEquals(stats.earliestMsgPublishTimeInBacklogs, 0L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_Earliest() { + TopicStatsImpl stats1 = new TopicStatsImpl(); + stats1.earliestMsgPublishTimeInBacklogs = 10L; + + TopicStatsImpl stats2 = new TopicStatsImpl(); + stats2.earliestMsgPublishTimeInBacklogs = 20L; + + TopicStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklogs, 10L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_First0() { + TopicStatsImpl stats1 = new TopicStatsImpl(); + stats1.earliestMsgPublishTimeInBacklogs = 0L; + + TopicStatsImpl stats2 = new TopicStatsImpl(); + stats2.earliestMsgPublishTimeInBacklogs = 20L; + + TopicStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklogs, 20L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_Second0() { + TopicStatsImpl stats1 = new TopicStatsImpl(); + stats1.earliestMsgPublishTimeInBacklogs = 10L; + + TopicStatsImpl stats2 = new TopicStatsImpl(); + stats2.earliestMsgPublishTimeInBacklogs = 0L; + + TopicStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklogs, 10L); + } + + @Test + public void testAdd_EarliestMsgPublishTimeInBacklogs_Zero() { + TopicStatsImpl stats1 = new TopicStatsImpl(); + stats1.earliestMsgPublishTimeInBacklogs = 0L; + + TopicStatsImpl stats2 = new TopicStatsImpl(); + stats2.earliestMsgPublishTimeInBacklogs = 0L; + + TopicStatsImpl aggregate = stats1.add(stats2); + assertEquals(aggregate.earliestMsgPublishTimeInBacklogs, 0L); + } +} \ No newline at end of file diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java index e90b6cbc4a13a..b24e9ae40822a 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java @@ -19,12 +19,15 @@ package org.apache.pulsar.common.util; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; +import java.lang.reflect.Field; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import org.testng.annotations.Test; @@ -94,4 +97,46 @@ public static class MyConfig { public Set stringSet; } + @Test + public void testNullStrValue() throws Exception { + class TestMap { + public List list; + public Set set; + public Map map; + public Optional optional; + } + + Field listField = TestMap.class.getField("list"); + Object listValue = FieldParser.value(null, listField); + assertNull(listValue); + + listValue = FieldParser.value("null", listField); + assertTrue(listValue instanceof List); + assertEquals(((List) listValue).size(), 1); + assertEquals(((List) listValue).get(0), "null"); + + + Field setField = TestMap.class.getField("set"); + Object setValue = FieldParser.value(null, setField); + assertNull(setValue); + + setValue = FieldParser.value("null", setField); + assertTrue(setValue instanceof Set); + assertEquals(((Set) setValue).size(), 1); + assertEquals(((Set) setValue).iterator().next(), "null"); + + Field mapField = TestMap.class.getField("map"); + Object mapValue = FieldParser.value(null, mapField); + assertNull(mapValue); + + try { + FieldParser.value("null", mapField); + } catch (IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains("null map-value is not in correct format key1=value,key2=value2")); + } + + Field optionalField = TestMap.class.getField("optional"); + Object optionalValue = FieldParser.value(null, optionalField); + assertEquals(optionalValue, Optional.empty()); + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java index 6df4494edf886..7d44c187d7355 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java @@ -181,7 +181,6 @@ public void testWaitForAny() { } } - @Test public void testSequencer() { int concurrentNum = 1000; final ScheduledExecutorService executor = Executors.newScheduledThreadPool(concurrentNum); diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java index a317fa63c0986..e1f947ad8c4f6 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java @@ -38,6 +38,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongFunction; import lombok.Cleanup; @@ -213,6 +214,66 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(map.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + try { + map.get(1); + } catch (Exception e) { + ex.set(e); + } + }); + } + + assertNull(map.put(1,"v1")); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertNull(map.put(2, "v2")); + assertNull(map.put(3, "v3")); + assertEquals(map.capacity(), 8); + + // shrink hashmap + assertTrue(map.remove(2, "v2")); + assertTrue(map.remove(3, "v3")); + assertEquals(map.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() @@ -361,7 +422,6 @@ public void concurrentInsertionsAndReads() throws Throwable { assertEquals(map.size(), N * nThreads); } - @Test public void stressConcurrentInsertionsAndReads() throws Throwable { ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() .expectedItems(4) diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java index 8e74d285ffb9b..0de3fdb5c84bf 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java @@ -31,9 +31,12 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair; import org.testng.annotations.Test; @@ -173,6 +176,68 @@ public void testExpandAndShrink() { assertEquals(map.capacity(), 8); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(map.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + map.get(1, 1); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertTrue(map.put(1, 1, 11, 11)); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertTrue(map.put(2, 2, 22, 22)); + assertTrue(map.put(3, 3, 33, 33)); + assertEquals(map.capacity(), 8); + + // shrink hashmap + assertTrue(map.remove(2, 2, 22, 22)); + assertTrue(map.remove(3, 3, 33, 33)); + assertEquals(map.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testExpandShrinkAndClear() { ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java index 7e947ae6e6aa3..bce2b8993835f 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java @@ -30,9 +30,11 @@ import java.util.List; import java.util.Random; import java.util.Set; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; import lombok.Cleanup; import org.apache.pulsar.common.util.collections.ConcurrentLongPairSet.LongPair; @@ -211,6 +213,68 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(set.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + set.contains(1, 1); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertTrue(set.add(1, 1)); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertTrue(set.add(2, 2)); + assertTrue(set.add(3, 3)); + assertEquals(set.capacity(), 8); + + // shrink hashmap + assertTrue(set.remove(2, 2)); + assertTrue(set.remove(3, 3)); + assertEquals(set.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java index 198a3f4c5c38b..410d490b98faa 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java @@ -32,11 +32,13 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import lombok.Cleanup; @@ -215,6 +217,68 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(map.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + map.get("k2"); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertNull(map.put("k1","v1")); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertNull(map.put("k2", "v2")); + assertNull(map.put("k3", "v3")); + assertEquals(map.capacity(), 8); + + // shrink hashmap + assertTrue(map.remove("k2", "v2")); + assertTrue(map.remove("k3", "v3")); + assertEquals(map.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { ConcurrentOpenHashMap map = diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java index 27c18abb8b347..6a40095ab0647 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java @@ -28,9 +28,11 @@ import java.util.Collections; import java.util.List; import java.util.Random; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; import lombok.Cleanup; import org.testng.annotations.Test; @@ -186,6 +188,68 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentOpenHashSet set = ConcurrentOpenHashSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(set.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + set.contains("k2"); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertTrue(set.add("k1")); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertTrue(set.add("k2")); + assertTrue(set.add("k3")); + assertEquals(set.capacity(), 8); + + // shrink hashmap + assertTrue(set.remove("k2")); + assertTrue(set.remove("k3")); + assertEquals(set.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { ConcurrentOpenHashSet set = diff --git a/pulsar-config-validation/pom.xml b/pulsar-config-validation/pom.xml index 5ce8f74d6ebad..3f5e181f7fd0e 100644 --- a/pulsar-config-validation/pom.xml +++ b/pulsar-config-validation/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar - pulsar - 3.0.0-SNAPSHOT - .. - - - pulsar-config-validation - Annotation based config validation for Pulsar - - - - org.slf4j - slf4j-api - - - - - - - org.gaul - modernizer-maven-plugin - - true - 17 - - - - modernizer - verify - - modernizer - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - \ No newline at end of file + + 4.0.0 + + io.streamnative + pulsar + 3.0.0-SNAPSHOT + .. + + pulsar-config-validation + Annotation based config validation for Pulsar + + + org.slf4j + slf4j-api + + + + + + org.gaul + modernizer-maven-plugin + + true + 17 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + + diff --git a/pulsar-function-go/conf/conf.go b/pulsar-function-go/conf/conf.go index c2ff443fcc40c..d52b886b540f9 100644 --- a/pulsar-function-go/conf/conf.go +++ b/pulsar-function-go/conf/conf.go @@ -50,8 +50,8 @@ type Conf struct { SecretsMap string `json:"secretsMap" yaml:"secretsMap"` Runtime int32 `json:"runtime" yaml:"runtime"` //Deprecated - AutoACK bool `json:"autoAck" yaml:"autoAck"` - Parallelism int32 `json:"parallelism" yaml:"parallelism"` + AutoACK bool `json:"autoAck" yaml:"autoAck"` + Parallelism int32 `json:"parallelism" yaml:"parallelism"` //source config SubscriptionType int32 `json:"subscriptionType" yaml:"subscriptionType"` TimeoutMs uint64 `json:"timeoutMs" yaml:"timeoutMs"` @@ -59,10 +59,16 @@ type Conf struct { CleanupSubscription bool `json:"cleanupSubscription" yaml:"cleanupSubscription"` SubscriptionPosition int32 `json:"subscriptionPosition" yaml:"subscriptionPosition"` //source input specs - SourceSpecTopic string `json:"sourceSpecsTopic" yaml:"sourceSpecsTopic"` - SourceSchemaType string `json:"sourceSchemaType" yaml:"sourceSchemaType"` - IsRegexPatternSubscription bool `json:"isRegexPatternSubscription" yaml:"isRegexPatternSubscription"` - ReceiverQueueSize int32 `json:"receiverQueueSize" yaml:"receiverQueueSize"` + SourceInputSpecs map[string]string `json:"sourceInputSpecs" yaml:"sourceInputSpecs"` + // for backward compatibility + // Deprecated + SourceSpecTopic string `json:"sourceSpecsTopic" yaml:"sourceSpecsTopic"` + // Deprecated + SourceSchemaType string `json:"sourceSchemaType" yaml:"sourceSchemaType"` + // Deprecated + IsRegexPatternSubscription bool `json:"isRegexPatternSubscription" yaml:"isRegexPatternSubscription"` + // Deprecated + ReceiverQueueSize int32 `json:"receiverQueueSize" yaml:"receiverQueueSize"` //sink spec config SinkSpecTopic string `json:"sinkSpecsTopic" yaml:"sinkSpecsTopic"` SinkSchemaType string `json:"sinkSchemaType" yaml:"sinkSchemaType"` diff --git a/pulsar-function-go/conf/conf.yaml b/pulsar-function-go/conf/conf.yaml index 59ac9bbd51308..098e33cda1f57 100644 --- a/pulsar-function-go/conf/conf.yaml +++ b/pulsar-function-go/conf/conf.yaml @@ -43,10 +43,8 @@ subscriptionName: "" cleanupSubscription: false subscriptionPosition: 1 # source input specs -sourceSpecsTopic: persistent://public/default/topic-01 -sourceSchemaType: "" -isRegexPatternSubscription: false -receiverQueueSize: 10 +sourceInputSpecs: + persistent://public/default/topic-01: "{\"schemaType\": \"\", \"isRegexPattern\": false, \"receiverQueueSize\": {\"value\": 10}}" # sink specs config sinkSpecsTopic: persistent://public/default/topic-02 sinkSchemaType: "" diff --git a/pulsar-function-go/pf/instance.go b/pulsar-function-go/pf/instance.go index 5d17cfe0c333a..2c2deb19b2813 100644 --- a/pulsar-function-go/pf/instance.go +++ b/pulsar-function-go/pf/instance.go @@ -149,7 +149,6 @@ func (gi *goInstance) startFunction(function function) error { defer metricsServicer.close() CLOSE: for { - idleTimer.Reset(idleDuration) select { case cm := <-channel: msgInput := cm.Message @@ -181,6 +180,11 @@ CLOSE: close(channel) break CLOSE } + // reset the idle timer and drain if appropriate before the next loop + if !idleTimer.Stop() { + <-idleTimer.C + } + idleTimer.Reset(idleDuration) } gi.closeLogTopic() @@ -404,11 +408,25 @@ func (gi *goInstance) processResult(msgInput pulsar.Message, output []byte) { // ackInputMessage doesn't produce any result, or the user doesn't want the result. func (gi *goInstance) ackInputMessage(inputMessage pulsar.Message) { log.Debugf("ack input message topic name is: %s", inputMessage.Topic()) - gi.consumers[inputMessage.Topic()].Ack(inputMessage) + gi.respondMessage(inputMessage, true) } func (gi *goInstance) nackInputMessage(inputMessage pulsar.Message) { - gi.consumers[inputMessage.Topic()].Nack(inputMessage) + gi.respondMessage(inputMessage, false) +} + +func (gi *goInstance) respondMessage(inputMessage pulsar.Message, ack bool) { + topicName, err := ParseTopicName(inputMessage.Topic()) + if err != nil { + log.Errorf("unable respond to message ID %s - invalid topic: %v", messageIDStr(inputMessage), err) + return + } + // consumers are indexed by topic name only (no partition) + if ack { + gi.consumers[topicName.NameWithoutPartition()].Ack(inputMessage) + return + } + gi.consumers[topicName.NameWithoutPartition()].Nack(inputMessage) } func getIdleTimeout(timeoutMilliSecond time.Duration) time.Duration { @@ -439,7 +457,6 @@ func (gi *goInstance) addLogTopicHandler() { }() if gi.context.logAppender == nil { - log.Error("the logAppender is nil, if you want to use it, please specify `--log-topic` at startup.") return } @@ -571,6 +588,9 @@ func (gi *goInstance) getMatchingMetricFunc() func(lbl *prometheus_client.LabelP func (gi *goInstance) getMatchingMetricFromRegistry(metricName string) prometheus_client.Metric { filteredMetricFamilies := gi.getFilteredMetricFamilies(metricName) + if len(filteredMetricFamilies) == 0 { + return prometheus_client.Metric{} + } metricFunc := gi.getMatchingMetricFunc() matchingMetric := getFirstMatch(filteredMetricFamilies[0].Metric, metricFunc) return *matchingMetric @@ -669,6 +689,9 @@ func (gi *goInstance) getTotalReceived1min() float32 { func (gi *goInstance) getUserMetricsMap() map[string]float64 { userMetricMap := map[string]float64{} filteredMetricFamilies := gi.getFilteredMetricFamilies(PulsarFunctionMetricsPrefix + UserMetric) + if len(filteredMetricFamilies) == 0 { + return userMetricMap + } for _, m := range filteredMetricFamilies[0].GetMetric() { var isFuncMetric bool var userLabelName string diff --git a/pulsar-function-go/pf/instanceConf.go b/pulsar-function-go/pf/instanceConf.go index d60beef29e8d3..9d4cabfae5a9b 100644 --- a/pulsar-function-go/pf/instanceConf.go +++ b/pulsar-function-go/pf/instanceConf.go @@ -20,6 +20,7 @@ package pf import ( + "encoding/json" "fmt" "time" @@ -44,6 +45,24 @@ type instanceConf struct { } func newInstanceConfWithConf(cfg *conf.Conf) *instanceConf { + inputSpecs := make(map[string]*pb.ConsumerSpec) + // for backward compatibility + if cfg.SourceSpecTopic != "" { + inputSpecs[cfg.SourceSpecTopic] = &pb.ConsumerSpec{ + SchemaType: cfg.SourceSchemaType, + IsRegexPattern: cfg.IsRegexPatternSubscription, + ReceiverQueueSize: &pb.ConsumerSpec_ReceiverQueueSize{ + Value: cfg.ReceiverQueueSize, + }, + } + } + for topic, value := range cfg.SourceInputSpecs { + spec := &pb.ConsumerSpec{} + if err := json.Unmarshal([]byte(value), spec); err != nil { + panic(fmt.Sprintf("Failed to unmarshal consume specs: %v", err)) + } + inputSpecs[topic] = spec + } instanceConf := &instanceConf{ instanceID: cfg.InstanceID, funcID: cfg.FuncID, @@ -66,16 +85,8 @@ func newInstanceConfWithConf(cfg *conf.Conf) *instanceConf { AutoAck: cfg.AutoACK, Parallelism: cfg.Parallelism, Source: &pb.SourceSpec{ - SubscriptionType: pb.SubscriptionType(cfg.SubscriptionType), - InputSpecs: map[string]*pb.ConsumerSpec{ - cfg.SourceSpecTopic: { - SchemaType: cfg.SourceSchemaType, - IsRegexPattern: cfg.IsRegexPatternSubscription, - ReceiverQueueSize: &pb.ConsumerSpec_ReceiverQueueSize{ - Value: cfg.ReceiverQueueSize, - }, - }, - }, + SubscriptionType: pb.SubscriptionType(cfg.SubscriptionType), + InputSpecs: inputSpecs, TimeoutMs: cfg.TimeoutMs, SubscriptionName: cfg.SubscriptionName, CleanupSubscription: cfg.CleanupSubscription, diff --git a/pulsar-function-go/pf/instanceControlServicer_test.go b/pulsar-function-go/pf/instanceControlServicer_test.go index 836ec6e5c7940..9344d0a591547 100644 --- a/pulsar-function-go/pf/instanceControlServicer_test.go +++ b/pulsar-function-go/pf/instanceControlServicer_test.go @@ -21,6 +21,7 @@ package pf import ( "context" + "fmt" "log" "net" "testing" @@ -76,3 +77,55 @@ func TestInstanceControlServicer_serve_creates_valid_instance(t *testing.T) { log.Printf("Response: %+v", resp.Success) assert.Equal(t, resp.Success, true) } + +func instanceCommunicationClient(t *testing.T, instance *goInstance) pb.InstanceControlClient { + t.Helper() + + if instance == nil { + t.Fatalf("cannot create communication client for nil instance") + } + + var ( + ctx context.Context = context.Background() + cf context.CancelFunc + ) + + if testDeadline, ok := t.Deadline(); ok { + ctx, cf = context.WithDeadline(context.Background(), testDeadline) + t.Cleanup(cf) + } + + lis = bufconn.Listen(bufSize) + t.Cleanup(func() { + lis.Close() + }) + // create a gRPC server object + grpcServer := grpc.NewServer() + t.Cleanup(func() { + grpcServer.Stop() + }) + + servicer := InstanceControlServicer{instance} + // must register before we start the service. + pb.RegisterInstanceControlServer(grpcServer, &servicer) + + // start the server + t.Logf("Serving InstanceCommunication on port %d", instance.context.GetPort()) + + go func() { + if err := grpcServer.Serve(lis); err != nil { + panic(fmt.Sprintf("grpc server exited with error: %v", err)) + } + }() + + // Now we can setup the client: + conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(getBufDialer(lis)), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Failed to dial bufnet: %v", err) + } + t.Cleanup(func() { + conn.Close() + }) + client := pb.NewInstanceControlClient(conn) + return client +} diff --git a/pulsar-function-go/pf/stats_test.go b/pulsar-function-go/pf/stats_test.go index d52b08b717377..7b415ef5eff0b 100644 --- a/pulsar-function-go/pf/stats_test.go +++ b/pulsar-function-go/pf/stats_test.go @@ -20,6 +20,7 @@ package pf import ( + "context" "fmt" "io/ioutil" "math" @@ -28,6 +29,7 @@ import ( "time" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" @@ -257,3 +259,28 @@ func TestUserMetrics(t *testing.T) { gi.close() metricsServicer.close() } + +func TestInstanceControlMetrics(t *testing.T) { + instance := newGoInstance() + t.Cleanup(instance.close) + instanceClient := instanceCommunicationClient(t, instance) + _, err := instanceClient.GetMetrics(context.Background(), &empty.Empty{}) + assert.NoError(t, err, "err communicating with instance control: %v", err) + + testLabels := []string{"userMetricControlTest1", "userMetricControlTest2"} + for _, label := range testLabels { + assert.NotContainsf(t, label, "user metrics should not yet contain %s", label) + } + + for value, label := range testLabels { + instance.context.RecordMetric(label, float64(value+1)) + } + time.Sleep(time.Second) + + metrics, err := instanceClient.GetMetrics(context.Background(), &empty.Empty{}) + assert.NoError(t, err, "err communicating with instance control: %v", err) + for value, label := range testLabels { + assert.Containsf(t, metrics.UserMetrics, label, "user metrics should contain metric %s", label) + assert.EqualValuesf(t, value+1, metrics.UserMetrics[label], "user metric %s != %d", label, value+1) + } +} diff --git a/pulsar-function-go/pf/util.go b/pulsar-function-go/pf/util.go index d5b32da841121..1d1aa1cab939f 100644 --- a/pulsar-function-go/pf/util.go +++ b/pulsar-function-go/pf/util.go @@ -21,6 +21,8 @@ package pf import ( "fmt" + + "github.com/apache/pulsar-client-go/pulsar" ) func getProperties(fullyQualifiedName string, instanceID int) map[string]string { @@ -39,3 +41,12 @@ func getDefaultSubscriptionName(tenant, namespace, name string) string { func getFullyQualifiedInstanceID(tenant, namespace, name string, instanceID int) string { return fmt.Sprintf("%s/%s/%s:%d", tenant, namespace, name, instanceID) } + +func messageIDStr(msg pulsar.Message) string { + // ::: + return fmt.Sprintf("%d:%d:%d:%d", + msg.ID().LedgerID(), + msg.ID().EntryID(), + msg.ID().PartitionIdx(), + msg.ID().BatchIdx()) +} diff --git a/pulsar-functions/api-java/pom.xml b/pulsar-functions/api-java/pom.xml index 0f6e33bc05de5..ef133487a1d3b 100644 --- a/pulsar-functions/api-java/pom.xml +++ b/pulsar-functions/api-java/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-api Pulsar Functions :: API - - org.slf4j slf4j-api - net.jodah typetools @@ -48,16 +44,13 @@ ${project.version} compile - ${project.groupId} pulsar-client-admin-api ${project.version} compile - - @@ -81,16 +74,15 @@ org.apache.maven.plugins maven-checkstyle-plugin - - checkstyle - verify - - check - - + + checkstyle + verify + + check + + - diff --git a/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml b/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml index 9638cfcca8da9..b74af4c8ceb2a 100644 --- a/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-instance Pulsar Functions :: Instance - - org.apache.logging.log4j @@ -46,55 +42,46 @@ org.apache.logging.log4j log4j-core - ${project.groupId} pulsar-functions-utils ${project.version} - ${project.groupId} pulsar-metadata ${project.version} - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-api ${project.version} - ${project.groupId} pulsar-functions-secrets ${project.version} - - + ${project.groupId} pulsar-client-admin-original ${project.version} - - + ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-client-messagecrypto-bc ${project.parent.version} - org.apache.bookkeeper stream-storage-java-client @@ -109,24 +96,20 @@ - io.grpc grpc-stub - io.grpc grpc-all - io.perfmark perfmark-api runtime - org.apache.bookkeeper bookkeeper-common @@ -141,61 +124,50 @@ - com.yahoo.datasketches sketches-core - com.google.guava guava - com.beust jcommander - net.jodah typetools - io.prometheus simpleclient ${prometheus.version} - io.prometheus simpleclient_hotspot ${prometheus.version} - io.prometheus simpleclient_httpserver ${prometheus.version} - io.prometheus.jmx collector ${prometheus-jmx.version} - org.awaitility awaitility test - - @@ -215,7 +187,6 @@ - @@ -271,5 +242,4 @@ - diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/ContextImpl.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/ContextImpl.java index d64c5f9b52db6..5cbbcad24c7a2 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/ContextImpl.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/ContextImpl.java @@ -148,7 +148,7 @@ public ContextImpl(InstanceConfig config, Logger logger, PulsarClient client, this.clientBuilder = clientBuilder; this.client = client; this.pulsarAdmin = pulsarAdmin; - this.topicSchema = new TopicSchema(client); + this.topicSchema = new TopicSchema(client, Thread.currentThread().getContextClassLoader()); this.statsManager = statsManager; this.producerBuilder = (ProducerBuilderImpl) client.newProducer().blockIfQueueFull(true).enableBatching(true) diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/go/GoInstanceConfig.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/go/GoInstanceConfig.java index abf17bdb1656d..67fe2a41d553d 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/go/GoInstanceConfig.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/go/GoInstanceConfig.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.instance.go; +import java.util.Map; import lombok.Getter; import lombok.Setter; import org.apache.pulsar.functions.proto.Function; @@ -53,6 +54,10 @@ public class GoInstanceConfig { private boolean cleanupSubscription; private int subscriptionPosition = Function.SubscriptionPosition.LATEST.getNumber(); + // value is the json string of ConsumerSpec + private Map sourceInputSpecs; + + // for backward compatibility private String sourceSpecsTopic = ""; private String sourceSchemaType = ""; private boolean isRegexPatternSubscription; diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreImpl.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreImpl.java index df1eae9b6a7ab..bf43f18b175e7 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreImpl.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreImpl.java @@ -166,7 +166,7 @@ public CompletableFuture getAsync(String key) { data.readBytes(result); // Set position to off the buffer to the beginning, since the position after the // read is going to be end of the buffer - // If we do not rewind to the begining here, users will have to explicitly do + // If we do not rewind to the beginning here, users will have to explicitly do // this in their function code // in order to use any of the ByteBuffer operations result.position(0); diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/sink/PulsarSink.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/sink/PulsarSink.java index 8add0a78c5fff..97a0ad0a2ce17 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/sink/PulsarSink.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/sink/PulsarSink.java @@ -349,7 +349,7 @@ public PulsarSink(PulsarClient client, PulsarSinkConfig pulsarSinkConfig, Map buildPulsarSourceConsumerConfig(String t Class typeArg) { PulsarSourceConsumerConfig.PulsarSourceConsumerConfigBuilder consumerConfBuilder = PulsarSourceConsumerConfig.builder().isRegexPattern(conf.isRegexPattern()) - .receiverQueueSize(conf.getReceiverQueueSize()) - .consumerProperties(conf.getConsumerProperties()); + .receiverQueueSize(conf.getReceiverQueueSize()) + .consumerProperties(conf.getConsumerProperties()); Schema schema; if (conf.getSerdeClassName() != null && !conf.getSerdeClassName().isEmpty()) { diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/SingleConsumerPulsarSource.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/SingleConsumerPulsarSource.java index 426723804cad1..d4d3ea00b9317 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/SingleConsumerPulsarSource.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/SingleConsumerPulsarSource.java @@ -44,14 +44,12 @@ public class SingleConsumerPulsarSource extends PulsarSource { private Consumer consumer; private final List> inputConsumers = new LinkedList<>(); - public SingleConsumerPulsarSource(PulsarClient pulsarClient, - SingleConsumerPulsarSourceConfig pulsarSourceConfig, - Map properties, - ClassLoader functionClassLoader) { + public SingleConsumerPulsarSource(PulsarClient pulsarClient, SingleConsumerPulsarSourceConfig pulsarSourceConfig, + Map properties, ClassLoader functionClassLoader) { super(pulsarClient, pulsarSourceConfig, properties, functionClassLoader); this.pulsarClient = pulsarClient; this.pulsarSourceConfig = pulsarSourceConfig; - this.topicSchema = new TopicSchema(pulsarClient); + this.topicSchema = new TopicSchema(pulsarClient, functionClassLoader); this.properties = properties; this.functionClassLoader = functionClassLoader; } @@ -60,8 +58,7 @@ public SingleConsumerPulsarSource(PulsarClient pulsarClient, public void open(Map config, SourceContext sourceContext) throws Exception { log.info("Opening pulsar source with config: {}", pulsarSourceConfig); - Class typeArg = Reflections.loadClass(this.pulsarSourceConfig.getTypeClassName(), - this.functionClassLoader); + Class typeArg = Reflections.loadClass(this.pulsarSourceConfig.getTypeClassName(), this.functionClassLoader); checkArgument(!Void.class.equals(typeArg), "Input type of Pulsar Function cannot be Void"); diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/TopicSchema.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/TopicSchema.java index 57f49fed0cac9..a912bc7ecf103 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/TopicSchema.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/source/TopicSchema.java @@ -19,7 +19,11 @@ package org.apache.pulsar.functions.source; import io.netty.buffer.ByteBuf; +import java.net.URL; +import java.net.URLClassLoader; import java.nio.ByteBuffer; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -49,8 +53,13 @@ public class TopicSchema { private final Map> cachedSchemas = new HashMap<>(); private final PulsarClient client; - public TopicSchema(PulsarClient client) { + private final ClassLoader functionsClassloader; + + public TopicSchema(PulsarClient client, ClassLoader functionsClassloader) { this.client = client; + this.functionsClassloader = AccessController.doPrivileged( + (PrivilegedAction) () -> new URLClassLoader(new URL[0], functionsClassloader) + ); } /** @@ -244,11 +253,11 @@ private Schema newSchemaInstance(String topic, Class clazz, ConsumerCo @SuppressWarnings("unchecked") private Schema newSchemaInstance(String topic, Class clazz, String schemaTypeOrClassName, boolean input) { return newSchemaInstance(topic, clazz, new ConsumerConfig(schemaTypeOrClassName), input, - Thread.currentThread().getContextClassLoader()); + functionsClassloader); } @SuppressWarnings("unchecked") private Schema newSchemaInstance(String topic, Class clazz, ConsumerConfig conf, boolean input) { - return newSchemaInstance(topic, clazz, conf, input, Thread.currentThread().getContextClassLoader()); + return newSchemaInstance(topic, clazz, conf, input, functionsClassloader); } } diff --git a/pulsar-functions/instance/src/main/resources/findbugsExclude.xml b/pulsar-functions/instance/src/main/resources/findbugsExclude.xml index 027affbfeb2ec..4c04f27f66fbe 100644 --- a/pulsar-functions/instance/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/instance/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-api-examples-builtin Pulsar Functions :: API Examples (NAR) - ${project.groupId} @@ -37,14 +35,36 @@ ${project.version} - org.apache.nifi nifi-nar-maven-plugin + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + - diff --git a/pulsar-functions/java-examples/pom.xml b/pulsar-functions/java-examples/pom.xml index 14e1e17d282ca..e9aec9fd86cc6 100644 --- a/pulsar-functions/java-examples/pom.xml +++ b/pulsar-functions/java-examples/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-api-examples Pulsar Functions :: API Examples - org.slf4j slf4j-api - ${project.groupId} pulsar-functions-api @@ -52,7 +49,6 @@ ${project.version} - @@ -87,5 +83,4 @@ - diff --git a/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml b/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-functions/localrun-shaded/pom.xml b/pulsar-functions/localrun-shaded/pom.xml index 2626732457b00..ae64fc2bdf294 100644 --- a/pulsar-functions/localrun-shaded/pom.xml +++ b/pulsar-functions/localrun-shaded/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-functions - 3.0.0-SNAPSHOT - .. - - - pulsar-functions-local-runner - Pulsar Functions :: Local Runner Shaded - - - - ${project.groupId} - pulsar-functions-local-runner-original - ${project.parent.version} - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - unpack - prepare-package - - unpack - - - - - org.asynchttpclient - async-http-client - ${asynchttpclient.version} - jar - true - org/asynchttpclient/config/ahc-default.properties - ${project.build.directory}/classes - - - - - - - - - maven-antrun-plugin - - - shade-ahc-properties - prepare-package - - run - - - - - - - - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - ${shadePluginPhase} - - shade - - - false - - - - - - - org.apache.pulsar:* - org.apache.bookkeeper:* - commons-*:* - org.apache.commons:* - com.fasterxml.jackson.*:* - io.netty:* - com.google.*:* - javax.servlet:* - org.reactivestreams:reactive-streams - org.apache.commons:* - io.swagger:* - org.yaml:snakeyaml - io.perfmark:* - io.prometheus:* - io.prometheus.jmx:* - javax.ws.rs:* - org.tukaani:xz - com.github.zafarkhaja:java-semver - net.java.dev.jna:* - org.apache.zookeeper:* - com.thoughtworks.paranamer:paranamer - jline:* - org.rocksdb:* - org.eclipse.jetty*:* - org.apache.avro:avro - com.beust:* - net.jodah:* - io.airlift:* - com.yahoo.datasketches:* - io.netty.resolver:* - - - - - org.apache.pulsar:pulsar-client-original - - ** - - - - org/bouncycastle/** - - - - - - + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + ${shadePluginPhase} + + shade + + + false + + + + + + + io.streamnative:* + org.apache.bookkeeper:* + commons-*:* + org.apache.commons:* + com.fasterxml.jackson.*:* + io.netty:* + com.google.*:* + javax.servlet:* + org.reactivestreams:reactive-streams + org.apache.commons:* + io.swagger:* + org.yaml:snakeyaml + io.perfmark:* + io.prometheus:* + io.prometheus.jmx:* + javax.ws.rs:* + org.tukaani:xz + com.github.zafarkhaja:java-semver + net.java.dev.jna:* + org.apache.zookeeper:* + com.thoughtworks.paranamer:paranamer + jline:* + org.rocksdb:* + org.eclipse.jetty*:* + org.apache.avro:avro + com.beust:* + net.jodah:* + io.airlift:* + com.yahoo.datasketches:* + io.netty.resolver:* + + + + + io.streamnative:pulsar-client-original + + ** + + + + org/bouncycastle/** + + + + + + - - com.google - org.apache.pulsar.functions.runtime.shaded.com.google - - - org.apache.jute - org.apache.pulsar.functions.runtime.shaded.org.apache.jute - - - javax.servlet - org.apache.pulsar.functions.runtime.shaded.javax.servlet - - - net.jodah - org.apache.pulsar.functions.runtime.shaded.net.jodah - - - org.lz4 - org.apache.pulsar.functions.runtime.shaded.org.lz4 - - - org.reactivestreams - org.apache.pulsar.functions.runtime.shaded.org.reactivestreams - - - org.apache.commons - org.apache.pulsar.functions.runtime.shaded.org.apache.commons - - - io.swagger - org.apache.pulsar.functions.runtime.shaded.io.swagger - - - org.yaml - org.apache.pulsar.functions.runtime.shaded.org.yaml - - - org.jctools - org.apache.pulsar.functions.runtime.shaded.org.jctools - - - com.squareup.okhttp - org.apache.pulsar.functions.runtime.shaded.com.squareup.okhttp - - - io.grpc - org.apache.pulsar.functions.runtime.shaded.io.grpc - - - io.perfmark - org.apache.pulsar.functions.runtime.shaded.io.perfmark - - - org.joda - org.apache.pulsar.functions.runtime.shaded.org.joda - - - javax.ws.rs - org.apache.pulsar.functions.runtime.shaded.javax.ws.rs - - - io.kubernetes - org.apache.pulsar.functions.runtime.shaded.io.kubernetes - - - io.opencensus - org.apache.pulsar.functions.runtime.shaded.io.opencensus - - - net.jpountz - org.apache.pulsar.functions.runtime.shaded.net.jpountz - - - commons-configuration - org.apache.pulsar.functions.runtime.shaded.commons-configuration - - - org.tukaani - org.apache.pulsar.functions.runtime.shaded.org.tukaani - - - com.github - org.apache.pulsar.functions.runtime.shaded.com.github - - - commons-io - org.apache.pulsar.functions.runtime.shaded.commons-io - - - org.apache.distributedlog - org.apache.pulsar.functions.runtime.shaded.org.apache.distributedlog - - - - com.fasterxml - org.apache.pulsar.functions.runtime.shaded.com.fasterxml - - - org.inferred - org.apache.pulsar.functions.runtime.shaded.org.inferred - - - org.apache.bookkeeper - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - - - org.bookkeeper - org.apache.pulsar.functions.runtime.shaded.org.bookkeeper - - - dlshade - org.apache.pulsar.functions.runtime.shaded.dlshade - - - - org.codehaus.jackson - org.apache.pulsar.functions.runtime.shaded.org.codehaus.jackson - - - net.java.dev.jna - org.apache.pulsar.functions.runtime.shaded.net.java.dev.jna - - - org.apache.curator - org.apache.pulsar.functions.runtime.shaded.org.apache.curator - - - javax.validation - org.apache.pulsar.functions.runtime.shaded.javax.validation - - - javax.activation - org.apache.pulsar.functions.runtime.shaded.javax.activation - - - io.prometheus - org.apache.pulsar.functions.runtime.shaded.io.prometheus - - - io.prometheus.jmx - org.apache.pulsar.functions.runtime.shaded.io.prometheus.jmx - - - org.apache.zookeeper - org.apache.pulsar.functions.runtime.shaded.org.apache.zookeeper - - - io.jsonwebtoken - org.apache.pulsar.functions.runtime.shaded.io.jsonwebtoken - - - commons-codec - org.apache.pulsar.functions.runtime.shaded.commons-codec - - - com.thoughtworks.paranamer - org.apache.pulsar.functions.runtime.shaded.com.thoughtworks.paranamer - - - org.codehaus.mojo - org.apache.pulsar.functions.runtime.shaded.org.codehaus.mojo - - - com.github.luben - org.apache.pulsar.functions.runtime.shaded.com.github.luben - - - jline - org.apache.pulsar.functions.runtime.shaded.jline - - - org.xerial.snappy - org.apache.pulsar.functions.runtime.shaded.org.xerial.snappy - - - javax.annotation - org.apache.pulsar.functions.runtime.shaded.javax.annotation - - - org.checkerframework - org.apache.pulsar.functions.runtime.shaded.org.checkerframework - - - org.apache.yetus - org.apache.pulsar.functions.runtime.shaded.org.apache.yetus - - - commons-cli - org.apache.pulsar.functions.runtime.shaded.commons-cli - - - commons-lang - org.apache.pulsar.functions.runtime.shaded.commons-lang - - - com.squareup.okio - org.apache.pulsar.functions.runtime.shaded.com.squareup.okio - - - org.rocksdb - org.apache.pulsar.functions.runtime.shaded.org.rocksdb - - - org.objenesis - org.apache.pulsar.functions.runtime.shaded.org.objenesis - - - org.eclipse.jetty - org.apache.pulsar.functions.runtime.shaded.org.eclipse.jetty - - - org.apache.avro - org.apache.pulsar.functions.runtime.shaded.org.apache.avro - - - avro.shaded - org.apache.pulsar.functions.runtime.shaded.avo.shaded - - - com.yahoo.datasketches - org.apache.pulsar.shaded.com.yahoo.datasketches - - - com.yahoo.sketches - org.apache.pulsar.shaded.com.yahoo.sketches - - - com.beust - org.apache.pulsar.functions.runtime.shaded.com.beust - - - - io.netty - org.apache.pulsar.functions.runtime.shaded.io.netty - - - org.hamcrest - org.apache.pulsar.functions.runtime.shaded.org.hamcrest - - - aj.org - org.apache.pulsar.functions.runtime.shaded.aj.org - - - com.scurrilous - org.apache.pulsar.functions.runtime.shaded.com.scurrilous - - - okio - org.apache.pulsar.functions.runtime.shaded.okio - - + + com.fasterxml + org.apache.pulsar.functions.runtime.shaded.com.fasterxml + + + org.inferred + org.apache.pulsar.functions.runtime.shaded.org.inferred + + + org.apache.bookkeeper + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + + + org.bookkeeper + org.apache.pulsar.functions.runtime.shaded.org.bookkeeper + + + dlshade + org.apache.pulsar.functions.runtime.shaded.dlshade + + + + org.codehaus.jackson + org.apache.pulsar.functions.runtime.shaded.org.codehaus.jackson + + + net.java.dev.jna + org.apache.pulsar.functions.runtime.shaded.net.java.dev.jna + + + org.apache.curator + org.apache.pulsar.functions.runtime.shaded.org.apache.curator + + + javax.validation + org.apache.pulsar.functions.runtime.shaded.javax.validation + + + javax.activation + org.apache.pulsar.functions.runtime.shaded.javax.activation + + + io.prometheus + org.apache.pulsar.functions.runtime.shaded.io.prometheus + + + io.prometheus.jmx + org.apache.pulsar.functions.runtime.shaded.io.prometheus.jmx + + + org.apache.zookeeper + org.apache.pulsar.functions.runtime.shaded.org.apache.zookeeper + + + io.jsonwebtoken + org.apache.pulsar.functions.runtime.shaded.io.jsonwebtoken + + + commons-codec + org.apache.pulsar.functions.runtime.shaded.commons-codec + + + com.thoughtworks.paranamer + org.apache.pulsar.functions.runtime.shaded.com.thoughtworks.paranamer + + + org.codehaus.mojo + org.apache.pulsar.functions.runtime.shaded.org.codehaus.mojo + + + com.github.luben + org.apache.pulsar.functions.runtime.shaded.com.github.luben + + + jline + org.apache.pulsar.functions.runtime.shaded.jline + + + org.xerial.snappy + org.apache.pulsar.functions.runtime.shaded.org.xerial.snappy + + + javax.annotation + org.apache.pulsar.functions.runtime.shaded.javax.annotation + + + org.checkerframework + org.apache.pulsar.functions.runtime.shaded.org.checkerframework + + + org.apache.yetus + org.apache.pulsar.functions.runtime.shaded.org.apache.yetus + + + commons-cli + org.apache.pulsar.functions.runtime.shaded.commons-cli + + + commons-lang + org.apache.pulsar.functions.runtime.shaded.commons-lang + + + com.squareup.okio + org.apache.pulsar.functions.runtime.shaded.com.squareup.okio + + + org.rocksdb + org.apache.pulsar.functions.runtime.shaded.org.rocksdb + + + org.objenesis + org.apache.pulsar.functions.runtime.shaded.org.objenesis + + + org.eclipse.jetty + org.apache.pulsar.functions.runtime.shaded.org.eclipse.jetty + + + org.apache.avro + org.apache.pulsar.functions.runtime.shaded.org.apache.avro + + + avro.shaded + org.apache.pulsar.functions.runtime.shaded.avo.shaded + + + com.yahoo.datasketches + org.apache.pulsar.shaded.com.yahoo.datasketches + + + com.yahoo.sketches + org.apache.pulsar.shaded.com.yahoo.sketches + + + com.beust + org.apache.pulsar.functions.runtime.shaded.com.beust + + + + io.netty + org.apache.pulsar.functions.runtime.shaded.io.netty + + + org.hamcrest + org.apache.pulsar.functions.runtime.shaded.org.hamcrest + + + aj.org + org.apache.pulsar.functions.runtime.shaded.aj.org + + + com.scurrilous + org.apache.pulsar.functions.runtime.shaded.com.scurrilous + + + okio + org.apache.pulsar.functions.runtime.shaded.okio + + - - org.asynchttpclient - org.apache.pulsar.functions.runtime.shaded.org.asynchttpclient - - - io.airlift - org.apache.pulsar.functions.runtime.shaded.io.airlift - - - - - - - - - + + + + +
    + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + +
    +
    diff --git a/pulsar-functions/localrun/pom.xml b/pulsar-functions/localrun/pom.xml index 6dcac5c4ac2be..611ab9ecc4dd5 100644 --- a/pulsar-functions/localrun/pom.xml +++ b/pulsar-functions/localrun/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-functions - 3.0.0-SNAPSHOT - .. - - - pulsar-functions-local-runner-original - Pulsar Functions :: Local Runner original - - - - ${project.groupId} - pulsar-functions-runtime - ${project.parent.version} - - - - - org.apache.logging.log4j - log4j-slf4j-impl - - - org.apache.logging.log4j - log4j-api - - - org.apache.logging.log4j - log4j-core - - - - io.grpc - grpc-all - - - - io.perfmark - perfmark-api - runtime - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - check - verify - - check - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - + + 4.0.0 + + io.streamnative + pulsar-functions + 3.0.0-SNAPSHOT + .. + + pulsar-functions-local-runner-original + Pulsar Functions :: Local Runner original + + + ${project.groupId} + pulsar-functions-runtime + ${project.parent.version} + + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-core + + + io.grpc + grpc-all + + + io.perfmark + perfmark-api + runtime + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + check + verify + + check + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java b/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java index ed9b0af3b43d8..711fa33edb2a2 100644 --- a/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java +++ b/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java @@ -52,7 +52,9 @@ import lombok.Value; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.functions.Utils; +import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.nar.FileUtils; @@ -75,8 +77,11 @@ import org.apache.pulsar.functions.secretsproviderconfigurator.SecretsProviderConfigurator; import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.FunctionConfigUtils; +import org.apache.pulsar.functions.utils.FunctionRuntimeCommon; +import org.apache.pulsar.functions.utils.LoadedFunctionPackage; import org.apache.pulsar.functions.utils.SinkConfigUtils; import org.apache.pulsar.functions.utils.SourceConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.functioncache.FunctionCacheEntry; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.utils.functions.FunctionUtils; @@ -357,9 +362,12 @@ public void start(boolean blocking) throws Exception { userCodeFile = functionConfig.getJar(); userCodeClassLoader = extractClassLoader( userCodeFile, ComponentType.FUNCTION, functionConfig.getClassName()); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), + FunctionDefinition.class); functionDetails = FunctionConfigUtils.convert( functionConfig, - FunctionConfigUtils.validateJavaFunction(functionConfig, getCurrentOrUserCodeClassLoader())); + FunctionConfigUtils.validateJavaFunction(functionConfig, validatableFunctionPackage)); } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.GO) { userCodeFile = functionConfig.getGo(); } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.PYTHON) { @@ -369,7 +377,10 @@ public void start(boolean blocking) throws Exception { } if (functionDetails == null) { - functionDetails = FunctionConfigUtils.convert(functionConfig, getCurrentOrUserCodeClassLoader()); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), + FunctionDefinition.class); + functionDetails = FunctionConfigUtils.convert(functionConfig, validatableFunctionPackage); } } else if (sourceConfig != null) { inferMissingArguments(sourceConfig); @@ -377,9 +388,10 @@ public void start(boolean blocking) throws Exception { parallelism = sourceConfig.getParallelism(); userCodeClassLoader = extractClassLoader( userCodeFile, ComponentType.SOURCE, sourceConfig.getClassName()); - functionDetails = SourceConfigUtils.convert( - sourceConfig, - SourceConfigUtils.validateAndExtractDetails(sourceConfig, getCurrentOrUserCodeClassLoader(), true)); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), ConnectorDefinition.class); + functionDetails = SourceConfigUtils.convert(sourceConfig, + SourceConfigUtils.validateAndExtractDetails(sourceConfig, validatableFunctionPackage, true)); } else if (sinkConfig != null) { inferMissingArguments(sinkConfig); userCodeFile = sinkConfig.getArchive(); @@ -387,6 +399,8 @@ public void start(boolean blocking) throws Exception { parallelism = sinkConfig.getParallelism(); userCodeClassLoader = extractClassLoader( userCodeFile, ComponentType.SINK, sinkConfig.getClassName()); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), ConnectorDefinition.class); if (isNotEmpty(sinkConfig.getTransformFunction())) { transformFunctionCodeClassLoader = extractClassLoader( sinkConfig.getTransformFunction(), @@ -395,16 +409,19 @@ public void start(boolean blocking) throws Exception { } ClassLoader functionClassLoader = null; + ValidatableFunctionPackage validatableTransformFunction = null; if (transformFunctionCodeClassLoader != null) { functionClassLoader = transformFunctionCodeClassLoader.getClassLoader() == null ? Thread.currentThread().getContextClassLoader() : transformFunctionCodeClassLoader.getClassLoader(); + validatableTransformFunction = + new LoadedFunctionPackage(functionClassLoader, FunctionDefinition.class); } functionDetails = SinkConfigUtils.convert( sinkConfig, - SinkConfigUtils.validateAndExtractDetails(sinkConfig, getCurrentOrUserCodeClassLoader(), - functionClassLoader, true)); + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunctionPackage, + validatableTransformFunction, true)); } else { throw new IllegalArgumentException("Must specify Function, Source or Sink config"); } @@ -472,7 +489,7 @@ private UserCodeClassLoader extractClassLoader(String userCodeFile, ComponentTyp if (classLoader == null) { if (userCodeFile != null && Utils.isFunctionPackageUrlSupported(userCodeFile)) { File file = FunctionCommon.extractFileFromPkgURL(userCodeFile); - classLoader = FunctionCommon.getClassLoaderFromPackage( + classLoader = FunctionRuntimeCommon.getClassLoaderFromPackage( componentType, className, file, narExtractionDirectory); classLoaderCreated = true; } else if (userCodeFile != null) { @@ -494,7 +511,7 @@ private UserCodeClassLoader extractClassLoader(String userCodeFile, ComponentTyp } throw new RuntimeException(errorMsg + " (" + userCodeFile + ") does not exist"); } - classLoader = FunctionCommon.getClassLoaderFromPackage( + classLoader = FunctionRuntimeCommon.getClassLoaderFromPackage( componentType, className, file, narExtractionDirectory); classLoaderCreated = true; } else { @@ -713,7 +730,7 @@ private ClassLoader isBuiltInFunction(String functionType) throws IOException { FunctionArchive function = functions.get(functionName); if (function != null && function.getFunctionDefinition().getFunctionClass() != null) { // Function type is a valid built-in type. - return function.getClassLoader(); + return function.getFunctionPackage().getClassLoader(); } else { return null; } @@ -727,7 +744,7 @@ private ClassLoader isBuiltInSource(String sourceType) throws IOException { Connector connector = connectors.get(source); if (connector != null && connector.getConnectorDefinition().getSourceClass() != null) { // Source type is a valid built-in connector type. - return connector.getClassLoader(); + return connector.getConnectorFunctionPackage().getClassLoader(); } else { return null; } @@ -741,18 +758,18 @@ private ClassLoader isBuiltInSink(String sinkType) throws IOException { Connector connector = connectors.get(sink); if (connector != null && connector.getConnectorDefinition().getSinkClass() != null) { // Sink type is a valid built-in connector type - return connector.getClassLoader(); + return connector.getConnectorFunctionPackage().getClassLoader(); } else { return null; } } private TreeMap getFunctions() throws IOException { - return FunctionUtils.searchForFunctions(functionsDir); + return FunctionUtils.searchForFunctions(functionsDir, narExtractionDirectory, true); } private TreeMap getConnectors() throws IOException { - return ConnectorUtils.searchForConnectors(connectorsDir, narExtractionDirectory); + return ConnectorUtils.searchForConnectors(connectorsDir, narExtractionDirectory, true); } private SecretsProviderConfigurator getSecretsProviderConfigurator() { diff --git a/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml b/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml index ad183eb5d9427..c7c41fe9ea252 100644 --- a/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + diff --git a/pulsar-functions/pom.xml b/pulsar-functions/pom.xml index 3176aba9c9188..5b8ecb76ab9b5 100644 --- a/pulsar-functions/pom.xml +++ b/pulsar-functions/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-functions Pulsar Functions :: Parent - main @@ -41,24 +39,23 @@ !true - - proto - api-java - java-examples - java-examples-builtin - utils - instance - runtime - runtime-all - worker - secrets - localrun - localrun-shaded - + + proto + api-java + java-examples + java-examples-builtin + utils + instance + runtime + runtime-all + worker + secrets + localrun + localrun-shaded + - - - core-modules + + core-modules proto api-java @@ -72,8 +69,6 @@ secrets localrun - - + - diff --git a/pulsar-functions/proto/pom.xml b/pulsar-functions/proto/pom.xml index f188a147b2cee..75ad2eb4fdd3f 100644 --- a/pulsar-functions/proto/pom.xml +++ b/pulsar-functions/proto/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-functions - 3.0.0-SNAPSHOT - - - pulsar-functions-proto - Pulsar Functions :: Proto - - - - - com.google.protobuf - protobuf-java - - - - com.google.protobuf - protobuf-java-util - - - - javax.annotation - javax.annotation-api - - - - io.grpc - grpc-all - - - io.netty - * - - - - - - io.perfmark - perfmark-api - runtime - - - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - ${protobuf-maven-plugin.version} - - com.google.protobuf:protoc:${protoc3.version}:exe:${os.detected.classifier} - true - grpc-java - io.grpc:protoc-gen-grpc-java:${protoc-gen-grpc-java.version}:exe:${os.detected.classifier} - - - - generate-sources - - compile - compile-custom - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-functions + 3.0.0-SNAPSHOT + + pulsar-functions-proto + Pulsar Functions :: Proto + + + com.google.protobuf + protobuf-java + + + com.google.protobuf + protobuf-java-util + + + javax.annotation + javax.annotation-api + + + io.grpc + grpc-all + + + io.netty + * + + + + + io.perfmark + perfmark-api + runtime + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf-maven-plugin.version} + + com.google.protobuf:protoc:${protoc3.version}:exe:${os.detected.classifier} + true + grpc-java + io.grpc:protoc-gen-grpc-java:${protoc-gen-grpc-java.version}:exe:${os.detected.classifier} + + + + generate-sources + + compile + compile-custom + + + + + + diff --git a/pulsar-functions/runtime-all/pom.xml b/pulsar-functions/runtime-all/pom.xml index bf41c9b400be5..e3ede4d5429d8 100644 --- a/pulsar-functions/runtime-all/pom.xml +++ b/pulsar-functions/runtime-all/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT .. - - pulsar-functions-runtime-all Pulsar Functions :: Runtime All - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-api ${project.version} - ${project.groupId} pulsar-client-api ${project.version} - org.apache.avro avro ${avro.version} - com.fasterxml.jackson.core jackson-databind - com.google.protobuf @@ -95,30 +85,24 @@ gson ${gson.version} - - org.slf4j slf4j-api - org.apache.logging.log4j log4j-slf4j-impl - org.apache.logging.log4j log4j-api - org.apache.logging.log4j log4j-core - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + diff --git a/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml b/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml index 190d9be92940b..84e6ce44f51e4 100644 --- a/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml +++ b/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml @@ -1,3 +1,4 @@ + - pulsar-functions-instance - 30 - - - pulsar.log.appender - RollingFile - - - pulsar.log.level - info - - - bk.log.level - info - - - - - Console - SYSTEM_OUT - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - RollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}*log.gz - - - 30d - - - - - - BkRollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}.bk*log.gz - - - 30d - - - - - - - - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - ${sys:bk.log.level} - false - - BkRollingFile - - - - info - - ${sys:pulsar.log.appender} - ${sys:pulsar.log.level} - - - - \ No newline at end of file + pulsar-functions-instance + 30 + + + pulsar.log.appender + RollingFile + + + pulsar.log.level + info + + + bk.log.level + info + + + + + Console + SYSTEM_OUT + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + RollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + */${sys:pulsar.function.log.file}*log.gz + + + 30d + + + + + + BkRollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + */${sys:pulsar.function.log.file}.bk*log.gz + + + 30d + + + + + + + + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + ${sys:bk.log.level} + false + + BkRollingFile + + + + info + + ${sys:pulsar.log.appender} + ${sys:pulsar.log.level} + + + + diff --git a/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml b/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml index f86d03e41793f..fe478be5b8667 100644 --- a/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml +++ b/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml @@ -1,3 +1,4 @@ + - pulsar-functions-kubernetes-instance - 30 - - - pulsar.log.level - info - - - bk.log.level - info - - - - - Console - SYSTEM_OUT - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - - - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - ${sys:bk.log.level} - false - - Console - - - - info - - Console - ${sys:pulsar.log.level} - - - - \ No newline at end of file + pulsar-functions-kubernetes-instance + 30 + + + pulsar.log.level + info + + + bk.log.level + info + + + + + Console + SYSTEM_OUT + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + + + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + ${sys:bk.log.level} + false + + Console + + + + info + + Console + ${sys:pulsar.log.level} + + + + diff --git a/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java b/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java index 854c146893243..b65bf17f70b44 100644 --- a/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java +++ b/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java @@ -46,6 +46,8 @@ * 8. Apache AVRO * 9. Jackson Mapper and Databind (dependency of AVRO) * 10. Apache Commons Compress (dependency of AVRO) + * 11. Apache Commons Lang (dependency of Apache Commons Compress) + * 12. Apache Commons IO (dependency of Apache Commons Compress) */ public class JavaInstanceDepsTest { @@ -71,6 +73,8 @@ public void testInstanceJarDeps() throws IOException { && !name.startsWith("org/apache/avro") && !name.startsWith("com/fasterxml/jackson") && !name.startsWith("org/apache/commons/compress") + && !name.startsWith("org/apache/commons/lang3") + && !name.startsWith("org/apache/commons/io") && !name.startsWith("com/google") && !name.startsWith("org/checkerframework") && !name.startsWith("javax/annotation") diff --git a/pulsar-functions/runtime/pom.xml b/pulsar-functions/runtime/pom.xml index 5558ced4c0749..df389d4268336 100644 --- a/pulsar-functions/runtime/pom.xml +++ b/pulsar-functions/runtime/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-runtime Pulsar Functions :: Runtime - - ${project.groupId} pulsar-functions-instance ${project.version} - ${project.groupId} pulsar-functions-secrets ${project.version} - com.beust jcommander - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.core jackson-databind - io.kubernetes client-java ${kubernetesclient.version} + + + bcpkix-jdk18on + org.bouncycastle + + + bcutil-jdk18on + org.bouncycastle + + + bcprov-jdk18on + org.bouncycastle + + ${project.groupId} pulsar-broker-common ${project.version} - - @@ -92,7 +95,6 @@ - org.apache.maven.plugins @@ -140,5 +142,4 @@ - diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProvider.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProvider.java index 1053e6e170ef1..e611801250f37 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProvider.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProvider.java @@ -161,7 +161,7 @@ public void cleanUpAuthData(Function.FunctionDetails funcDetails, Optional { try { - coreClient.readNamespacedSecret(secretName, kubeNamespace, - null, null, null); + coreClient.readNamespacedSecret(secretName, kubeNamespace, null); } catch (ApiException e) { // statefulset is gone @@ -305,12 +304,13 @@ private void upsertSecret(String token, Function.FunctionDetails funcDetails, St .data(buildSecretMap(token)); try { - coreClient.createNamespacedSecret(kubeNamespace, v1Secret, null, null, null); + coreClient.createNamespacedSecret(kubeNamespace, v1Secret, null, null, null, null); } catch (ApiException e) { if (e.getCode() == HTTP_CONFLICT) { try { coreClient - .replaceNamespacedSecret(secretName, kubeNamespace, v1Secret, null, null, null); + .replaceNamespacedSecret(secretName, kubeNamespace, + v1Secret, null, null, null, null); return Actions.ActionResult.builder().success(true).build(); } catch (ApiException e1) { @@ -366,7 +366,7 @@ private String createSecret(String token, Function.FunctionDetails funcDetails) .metadata(new V1ObjectMeta().name(getSecretName(id))) .data(buildSecretMap(token)); try { - coreClient.createNamespacedSecret(kubeNamespace, v1Secret, null, null, null); + coreClient.createNamespacedSecret(kubeNamespace, v1Secret, null, null, null, null); } catch (ApiException e) { // already exists if (e.getCode() == HTTP_CONFLICT) { diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java index deff690815d9c..8436cbaf42e09 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java @@ -38,6 +38,9 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.functions.WindowConfig; import org.apache.pulsar.common.nar.NarClassLoader; @@ -49,10 +52,13 @@ import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.proto.InstanceControlGrpc; +import org.apache.pulsar.functions.runtime.thread.ThreadRuntime; import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory; import org.apache.pulsar.functions.secretsprovider.ClearTextSecretsProvider; import org.apache.pulsar.functions.secretsprovider.SecretsProvider; import org.apache.pulsar.functions.utils.FunctionCommon; +import org.apache.pulsar.functions.utils.functioncache.FunctionCacheManager; +import org.apache.pulsar.functions.utils.functioncache.FunctionCacheManagerImpl; @Slf4j @@ -185,7 +191,10 @@ public void start(String[] args, ClassLoader functionInstanceClassLoader, ClassL functionDetailsJsonString = functionDetailsJsonString.substring(0, functionDetailsJsonString.length() - 1); } JsonFormat.parser().merge(functionDetailsJsonString, functionDetailsBuilder); - inferringMissingTypeClassName(functionDetailsBuilder, functionInstanceClassLoader); + FunctionCacheManager fnCache = new FunctionCacheManagerImpl(rootClassLoader); + ClassLoader functionClassLoader = ThreadRuntime.loadJars(jarFile, instanceConfig, functionId, + functionDetailsBuilder.getName(), narExtractionDirectory, fnCache); + inferringMissingTypeClassName(functionDetailsBuilder, functionClassLoader); Function.FunctionDetails functionDetails = functionDetailsBuilder.build(); instanceConfig.setFunctionDetails(functionDetails); instanceConfig.setPort(port); @@ -230,7 +239,7 @@ public void start(String[] args, ClassLoader functionInstanceClassLoader, ClassL .tlsHostnameVerificationEnable(isTrue(tlsHostNameVerificationEnabled)) .tlsTrustCertsFilePath(tlsTrustCertFilePath).build(), secretsProvider, collectorRegistry, narExtractionDirectory, rootClassLoader, - exposePulsarAdminClientEnabled, webServiceUrl); + exposePulsarAdminClientEnabled, webServiceUrl, fnCache); runtimeSpawner = new RuntimeSpawner( instanceConfig, jarFile, @@ -312,7 +321,8 @@ public void close() { } private void inferringMissingTypeClassName(Function.FunctionDetails.Builder functionDetailsBuilder, - ClassLoader classLoader) throws ClassNotFoundException { + ClassLoader classLoader) { + TypePool typePool = TypePool.Default.of(ClassFileLocator.ForClassLoader.of(classLoader)); switch (functionDetailsBuilder.getComponentType()) { case FUNCTION: if ((functionDetailsBuilder.hasSource() @@ -322,7 +332,8 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func Map userConfigs = new Gson().fromJson(functionDetailsBuilder.getUserConfig(), new TypeToken>() { }.getType()); - boolean isWindowConfigPresent = userConfigs.containsKey(WindowConfig.WINDOW_CONFIG_KEY); + boolean isWindowConfigPresent = + userConfigs != null && userConfigs.containsKey(WindowConfig.WINDOW_CONFIG_KEY); String className = functionDetailsBuilder.getClassName(); if (isWindowConfigPresent) { WindowConfig windowConfig = new Gson().fromJson( @@ -330,14 +341,13 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func WindowConfig.class); className = windowConfig.getActualWindowFunctionClassName(); } - - Class[] typeArgs = FunctionCommon.getFunctionTypes(classLoader.loadClass(className), + TypeDefinition[] typeArgs = FunctionCommon.getFunctionTypes(typePool.describe(className).resolve(), isWindowConfigPresent); if (functionDetailsBuilder.hasSource() && functionDetailsBuilder.getSource().getTypeClassName().isEmpty() && typeArgs[0] != null) { Function.SourceSpec.Builder sourceBuilder = functionDetailsBuilder.getSource().toBuilder(); - sourceBuilder.setTypeClassName(typeArgs[0].getName()); + sourceBuilder.setTypeClassName(typeArgs[0].asErasure().getTypeName()); functionDetailsBuilder.setSource(sourceBuilder.build()); } @@ -345,7 +355,7 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func && functionDetailsBuilder.getSink().getTypeClassName().isEmpty() && typeArgs[1] != null) { Function.SinkSpec.Builder sinkBuilder = functionDetailsBuilder.getSink().toBuilder(); - sinkBuilder.setTypeClassName(typeArgs[1].getName()); + sinkBuilder.setTypeClassName(typeArgs[1].asErasure().getTypeName()); functionDetailsBuilder.setSink(sinkBuilder.build()); } } @@ -353,7 +363,9 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func case SINK: if ((functionDetailsBuilder.hasSink() && functionDetailsBuilder.getSink().getTypeClassName().isEmpty())) { - String typeArg = getSinkType(functionDetailsBuilder.getClassName(), classLoader).getName(); + String typeArg = + getSinkType(functionDetailsBuilder.getSink().getClassName(), typePool).asErasure() + .getTypeName(); Function.SinkSpec.Builder sinkBuilder = Function.SinkSpec.newBuilder(functionDetailsBuilder.getSink()); @@ -371,7 +383,9 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func case SOURCE: if ((functionDetailsBuilder.hasSource() && functionDetailsBuilder.getSource().getTypeClassName().isEmpty())) { - String typeArg = getSourceType(functionDetailsBuilder.getClassName(), classLoader).getName(); + String typeArg = + getSourceType(functionDetailsBuilder.getSource().getClassName(), typePool).asErasure() + .getTypeName(); Function.SourceSpec.Builder sourceBuilder = Function.SourceSpec.newBuilder(functionDetailsBuilder.getSource()); diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java index 53ebfcbfaf068..1a014b6f5c54a 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java @@ -38,6 +38,7 @@ import java.net.InetAddress; import java.net.URL; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -204,9 +205,15 @@ public static List getGoInstanceCmd(InstanceConfig instanceConfig, instanceConfig.getFunctionDetails().getSource().getSubscriptionPosition().getNumber()); if (instanceConfig.getFunctionDetails().getSource().getInputSpecsMap() != null) { - for (String inputTopic : instanceConfig.getFunctionDetails().getSource().getInputSpecsMap().keySet()) { - goInstanceConfig.setSourceSpecsTopic(inputTopic); + Map sourceInputSpecs = new HashMap<>(); + for (Map.Entry entry : + instanceConfig.getFunctionDetails().getSource().getInputSpecsMap().entrySet()) { + String topic = entry.getKey(); + Function.ConsumerSpec spec = entry.getValue(); + sourceInputSpecs.put(topic, JsonFormat.printer().omittingInsignificantWhitespace().print(spec)); + goInstanceConfig.setSourceSpecsTopic(topic); } + goInstanceConfig.setSourceInputSpecs(sourceInputSpecs); } if (instanceConfig.getFunctionDetails().getSource().getTimeoutMs() != 0) { @@ -304,7 +311,7 @@ public static List getCmd(InstanceConfig instanceConfig, } if (StringUtils.isNotEmpty(functionInstanceClassPath)) { - args.add(String.format("-D%s=%s", FUNCTIONS_INSTANCE_CLASSPATH, functionInstanceClassPath)); + args.add(String.format("-D%s=%s", FUNCTIONS_INSTANCE_CLASSPATH, functionInstanceClassPath)); } else { // add complete classpath for broker/worker so that the function instance can load // the functions instance dependencies separately from user code dependencies diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java index e206862b68a67..77fa1cb14e65c 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java @@ -472,7 +472,7 @@ private void submitService() throws Exception { .supplier(() -> { final V1Service response; try { - response = coreClient.createNamespacedService(jobNamespace, service, null, null, null); + response = coreClient.createNamespacedService(jobNamespace, service, null, null, null, null); } catch (ApiException e) { // already exists if (e.getCode() == HTTP_CONFLICT) { @@ -561,7 +561,8 @@ private void submitStatefulSet() throws Exception { .supplier(() -> { final V1StatefulSet response; try { - response = appsClient.createNamespacedStatefulSet(jobNamespace, statefulSet, null, null, null); + response = appsClient.createNamespacedStatefulSet(jobNamespace, + statefulSet, null, null, null, null); } catch (ApiException e) { // already exists if (e.getCode() == HTTP_CONFLICT) { @@ -657,8 +658,7 @@ public void deleteStatefulSet() throws InterruptedException { .supplier(() -> { V1StatefulSet response; try { - response = appsClient.readNamespacedStatefulSet(statefulSetName, jobNamespace, - null, null, null); + response = appsClient.readNamespacedStatefulSet(statefulSetName, jobNamespace, null); } catch (ApiException e) { // statefulset is gone if (e.getCode() == HTTP_NOT_FOUND) { @@ -684,10 +684,11 @@ public void deleteStatefulSet() throws InterruptedException { .numRetries(KubernetesRuntimeFactory.numRetries * 2) .sleepBetweenInvocationsMs(KubernetesRuntimeFactory.sleepBetweenRetriesMs * 2) .supplier(() -> { + Map validLabels = getLabels(instanceConfig.getFunctionDetails()); String labels = String.format("tenant=%s,namespace=%s,name=%s", - instanceConfig.getFunctionDetails().getTenant(), - instanceConfig.getFunctionDetails().getNamespace(), - instanceConfig.getFunctionDetails().getName()); + validLabels.get("tenant"), + validLabels.get("namespace"), + validLabels.get("name")); V1PodList response; try { @@ -805,8 +806,7 @@ public void deleteService() throws InterruptedException { .supplier(() -> { V1Service response; try { - response = coreClient.readNamespacedService(serviceName, jobNamespace, - null, null, null); + response = coreClient.readNamespacedService(serviceName, jobNamespace, null); } catch (ApiException e) { // service is gone @@ -879,14 +879,24 @@ private List getDownloadCommand(String tenant, String namespace, String // add auth plugin and parameters if necessary if (authenticationEnabled && authConfig != null) { if (isNotBlank(authConfig.getClientAuthenticationPlugin()) - && isNotBlank(authConfig.getClientAuthenticationParameters()) - && instanceConfig.getFunctionAuthenticationSpec() != null) { + && isNotBlank(authConfig.getClientAuthenticationParameters())) { cmd.addAll(Arrays.asList( "--auth-plugin", authConfig.getClientAuthenticationPlugin(), "--auth-params", authConfig.getClientAuthenticationParameters())); } + if (authConfig.isTlsAllowInsecureConnection()) { + cmd.add("--tls-allow-insecure"); + } + if (authConfig.isTlsHostnameVerificationEnable()) { + cmd.add("--tls-enable-hostname-verification"); + } + if (isNotBlank(authConfig.getTlsTrustCertsFilePath())) { + cmd.addAll(Arrays.asList( + "--tls-trust-cert-path", + authConfig.getTlsTrustCertsFilePath())); + } } cmd.addAll(Arrays.asList( diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java index 895304138a530..bbb6e3992a018 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java @@ -302,6 +302,11 @@ public KubernetesRuntime createContainer(InstanceConfig instanceConfig, String c .map((customizer) -> customizer.customizeName(instanceConfig.getFunctionDetails(), jobName)) .orElse(jobName); + // pass grpcPort configured in functionRuntimeFactoryConfigs.grpcPort in functions_worker.yml + if (grpcPort != null) { + instanceConfig.setPort(grpcPort); + } + // pass metricsPort configured in functionRuntimeFactoryConfigs.metricsPort in functions_worker.yml if (metricsPort != null) { instanceConfig.setMetricsPort(metricsPort); @@ -405,7 +410,7 @@ static void fetchConfigMap(CoreV1Api coreClient, String changeConfigMap, KubernetesRuntimeFactory kubernetesRuntimeFactory) { try { V1ConfigMap v1ConfigMap = - coreClient.readNamespacedConfigMap(changeConfigMap, changeConfigMapNamespace, null, true, false); + coreClient.readNamespacedConfigMap(changeConfigMap, changeConfigMapNamespace, null); Map data = v1ConfigMap.getData(); if (data != null) { overRideKubernetesConfig(data, kubernetesRuntimeFactory); diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java index 0aa0cd95aef09..9dca4015d5ef5 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java @@ -124,27 +124,29 @@ private static ClassLoader getFunctionClassLoader(InstanceConfig instanceConfig, if (componentType == Function.FunctionDetails.ComponentType.FUNCTION && functionsManager.isPresent()) { return functionsManager.get() .getFunction(instanceConfig.getFunctionDetails().getBuiltin()) - .getClassLoader(); + .getFunctionPackage().getClassLoader(); } if (componentType == Function.FunctionDetails.ComponentType.SOURCE && connectorsManager.isPresent()) { return connectorsManager.get() .getConnector(instanceConfig.getFunctionDetails().getSource().getBuiltin()) - .getClassLoader(); + .getConnectorFunctionPackage().getClassLoader(); } if (componentType == Function.FunctionDetails.ComponentType.SINK && connectorsManager.isPresent()) { return connectorsManager.get() .getConnector(instanceConfig.getFunctionDetails().getSink().getBuiltin()) - .getClassLoader(); + .getConnectorFunctionPackage().getClassLoader(); } } - return loadJars(jarFile, instanceConfig, functionId, narExtractionDirectory, fnCache); + return loadJars(jarFile, instanceConfig, functionId, instanceConfig.getFunctionDetails().getName(), + narExtractionDirectory, fnCache); } - private static ClassLoader loadJars(String jarFile, - InstanceConfig instanceConfig, - String functionId, - String narExtractionDirectory, - FunctionCacheManager fnCache) throws Exception { + public static ClassLoader loadJars(String jarFile, + InstanceConfig instanceConfig, + String functionId, + String functionName, + String narExtractionDirectory, + FunctionCacheManager fnCache) throws Exception { if (jarFile == null) { return Thread.currentThread().getContextClassLoader(); } @@ -175,8 +177,9 @@ private static ClassLoader loadJars(String jarFile, Collections.emptyList()); } - log.info("Initialize function class loader for function {} at function cache manager, functionClassLoader: {}", - instanceConfig.getFunctionDetails().getName(), fnCache.getClassLoader(functionId)); + log.info( + "Initialize function class loader for function {} at function cache manager, functionClassLoader: {}", + functionName, fnCache.getClassLoader(functionId)); fnClassLoader = fnCache.getClassLoader(functionId); if (null == fnClassLoader) { diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java index 7bc055b25d6b9..cb9ad27a2dff8 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java @@ -86,7 +86,21 @@ public ThreadRuntimeFactory(String threadGroupName, String pulsarServiceUrl, stateStorageImplClass, storageServiceUrl, null, secretsProvider, collectorRegistry, narExtractionDirectory, rootClassLoader, exposePulsarAdminClientEnabled, pulsarWebServiceUrl, Optional.empty(), - Optional.empty()); + Optional.empty(), null); + } + + public ThreadRuntimeFactory(String threadGroupName, String pulsarServiceUrl, + String stateStorageImplClass, + String storageServiceUrl, + AuthenticationConfig authConfig, SecretsProvider secretsProvider, + FunctionCollectorRegistry collectorRegistry, String narExtractionDirectory, + ClassLoader rootClassLoader, boolean exposePulsarAdminClientEnabled, + String pulsarWebServiceUrl, FunctionCacheManager fnCache) throws Exception { + initialize(threadGroupName, Optional.empty(), pulsarServiceUrl, authConfig, + stateStorageImplClass, storageServiceUrl, null, secretsProvider, collectorRegistry, + narExtractionDirectory, + rootClassLoader, exposePulsarAdminClientEnabled, pulsarWebServiceUrl, Optional.empty(), + Optional.empty(), fnCache); } private void initialize(String threadGroupName, Optional memoryLimit, @@ -96,7 +110,7 @@ private void initialize(String threadGroupName, Optional connectorsManager, - Optional functionsManager) + Optional functionsManager, FunctionCacheManager fnCache) throws PulsarClientException { if (rootClassLoader == null) { @@ -106,7 +120,10 @@ private void initialize(String threadGroupName, Optional connectors; + @VisibleForTesting + public ConnectorsManager() { + this.connectors = new TreeMap<>(); + } + public ConnectorsManager(WorkerConfig workerConfig) throws IOException { - this.connectors = ConnectorUtils - .searchForConnectors(workerConfig.getConnectorsDirectory(), workerConfig.getNarExtractionDirectory()); + this.connectors = createConnectors(workerConfig); + } + + private static TreeMap createConnectors(WorkerConfig workerConfig) throws IOException { + boolean enableClassloading = workerConfig.getEnableClassloadingOfBuiltinFiles() + || ThreadRuntimeFactory.class.getName().equals(workerConfig.getFunctionRuntimeFactoryClassName()); + return ConnectorUtils.searchForConnectors(workerConfig.getConnectorsDirectory(), + workerConfig.getNarExtractionDirectory(), enableClassloading); + } + + @VisibleForTesting + public void addConnector(String connectorType, Connector connector) { + connectors.put(connectorType, connector); } public Connector getConnector(String connectorType) { @@ -71,7 +89,25 @@ public Path getSinkArchive(String sinkType) { } public void reloadConnectors(WorkerConfig workerConfig) throws IOException { - connectors = ConnectorUtils - .searchForConnectors(workerConfig.getConnectorsDirectory(), workerConfig.getNarExtractionDirectory()); + TreeMap oldConnectors = connectors; + this.connectors = createConnectors(workerConfig); + closeConnectors(oldConnectors); } + + @Override + public void close() { + closeConnectors(connectors); + } + + private void closeConnectors(TreeMap connectorMap) { + connectorMap.values().forEach(connector -> { + try { + connector.close(); + } catch (Exception e) { + log.warn("Failed to close connector", e); + } + }); + connectorMap.clear(); + } + } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java index 9199d568cad03..5ab7ff7221abb 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.worker; +import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.nio.file.Path; import java.util.List; @@ -25,16 +26,25 @@ import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.functions.FunctionDefinition; +import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.utils.functions.FunctionUtils; @Slf4j -public class FunctionsManager { - +public class FunctionsManager implements AutoCloseable { private TreeMap functions; + @VisibleForTesting + public FunctionsManager() { + this.functions = new TreeMap<>(); + } + public FunctionsManager(WorkerConfig workerConfig) throws IOException { - this.functions = FunctionUtils.searchForFunctions(workerConfig.getFunctionsDirectory()); + this.functions = createFunctions(workerConfig); + } + + public void addFunction(String functionType, FunctionArchive functionArchive) { + functions.put(functionType, functionArchive); } public FunctionArchive getFunction(String functionType) { @@ -51,6 +61,32 @@ public List getFunctionDefinitions() { } public void reloadFunctions(WorkerConfig workerConfig) throws IOException { - this.functions = FunctionUtils.searchForFunctions(workerConfig.getFunctionsDirectory()); + TreeMap oldFunctions = functions; + this.functions = createFunctions(workerConfig); + closeFunctions(oldFunctions); + } + + private static TreeMap createFunctions(WorkerConfig workerConfig) throws IOException { + boolean enableClassloading = workerConfig.getEnableClassloadingOfBuiltinFiles() + || ThreadRuntimeFactory.class.getName().equals(workerConfig.getFunctionRuntimeFactoryClassName()); + return FunctionUtils.searchForFunctions(workerConfig.getFunctionsDirectory(), + workerConfig.getNarExtractionDirectory(), + enableClassloading); + } + + @Override + public void close() { + closeFunctions(functions); + } + + private void closeFunctions(TreeMap functionMap) { + functionMap.values().forEach(functionArchive -> { + try { + functionArchive.close(); + } catch (Exception e) { + log.warn("Failed to close function archive", e); + } + }); + functionMap.clear(); } } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java index 3b8ddf774d162..f88df1bd24c3f 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java @@ -238,6 +238,22 @@ public class WorkerConfig implements Serializable, PulsarConfiguration { ) private boolean zooKeeperAllowReadOnlyOperations; + @FieldContext( + category = CATEGORY_WORKER, + doc = "Specifies if the function worker should use classloading for validating submissions for built-in " + + "connectors and functions. This is required for validateConnectorConfig to take effect. " + + "Default is false." + ) + private Boolean enableClassloadingOfBuiltinFiles = false; + + @FieldContext( + category = CATEGORY_WORKER, + doc = "Specifies if the function worker should use classloading for validating submissions for external " + + "connectors and functions. This is required for validateConnectorConfig to take effect. " + + "Default is false." + ) + private Boolean enableClassloadingOfExternalFiles = false; + @FieldContext( category = CATEGORY_CONNECTORS, doc = "The path to the location to locate builtin connectors" @@ -250,7 +266,22 @@ public class WorkerConfig implements Serializable, PulsarConfiguration { private String narExtractionDirectory = NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR; @FieldContext( category = CATEGORY_CONNECTORS, - doc = "Should we validate connector config during submission" + doc = "Whether to enable referencing connectors directory files by file url in connector (sink/source) " + + "creation. Default is true." + ) + private Boolean enableReferencingConnectorDirectoryFiles = true; + @FieldContext( + category = CATEGORY_FUNCTIONS, + doc = "Regex patterns for enabling creation of connectors by referencing packages in matching http/https " + + "urls." + ) + private List additionalEnabledConnectorUrlPatterns = new ArrayList<>(); + @FieldContext( + category = CATEGORY_CONNECTORS, + doc = "Enables extended validation for connector config with fine-grain annotation based validation " + + "during submission. Classloading with either enableClassloadingOfExternalFiles or " + + "enableClassloadingOfBuiltinFiles must be enabled on the worker for this to take effect. " + + "Default is false." ) private Boolean validateConnectorConfig = false; @FieldContext( @@ -263,6 +294,18 @@ public class WorkerConfig implements Serializable, PulsarConfiguration { doc = "The path to the location to locate builtin functions" ) private String functionsDirectory = "./functions"; + @FieldContext( + category = CATEGORY_FUNCTIONS, + doc = "Whether to enable referencing functions directory files by file url in functions creation. " + + "Default is true." + ) + private Boolean enableReferencingFunctionsDirectoryFiles = true; + @FieldContext( + category = CATEGORY_FUNCTIONS, + doc = "Regex patterns for enabling creation of functions by referencing packages in matching http/https " + + "urls." + ) + private List additionalEnabledFunctionsUrlPatterns = new ArrayList<>(); @FieldContext( category = CATEGORY_FUNC_METADATA_MNG, doc = "The Pulsar topic used for storing function metadata" diff --git a/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml b/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml index b4a4e5926de3f..976abe7c54bb2 100644 --- a/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - \ No newline at end of file + + + + + + + + + + diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProviderTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProviderTest.java index 081e693b6a321..cf294afcf9b78 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProviderTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/auth/KubernetesSecretsTokenAuthProviderTest.java @@ -103,7 +103,7 @@ public void testConfigureAuthDataStatefulSetNoCa() { @Test public void testCacheAuthData() throws ApiException { CoreV1Api coreV1Api = mock(CoreV1Api.class); - doReturn(new V1Secret()).when(coreV1Api).createNamespacedSecret(anyString(), any(), anyString(), anyString(), anyString()); + doReturn(new V1Secret()).when(coreV1Api).createNamespacedSecret(anyString(), any(), anyString(), anyString(), anyString(), anyString()); KubernetesSecretsTokenAuthProvider kubernetesSecretsTokenAuthProvider = new KubernetesSecretsTokenAuthProvider(); kubernetesSecretsTokenAuthProvider.initialize(coreV1Api, null, (fd) -> "default"); Function.FunctionDetails funcDetails = Function.FunctionDetails.newBuilder().setTenant("test-tenant").setNamespace("test-ns").setName("test-func").build(); diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java index a5fc8f231a6b5..48497bf218d40 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java @@ -468,9 +468,9 @@ public void testDynamicConfigMapLoading() throws Exception { KubernetesRuntimeFactory kubernetesRuntimeFactory = getKuberentesRuntimeFactory(); CoreV1Api coreV1Api = Mockito.mock(CoreV1Api.class); V1ConfigMap v1ConfigMap = new V1ConfigMap(); - Mockito.doReturn(v1ConfigMap).when(coreV1Api).readNamespacedConfigMap(any(), any(), any(), any(), any()); + Mockito.doReturn(v1ConfigMap).when(coreV1Api).readNamespacedConfigMap(any(), any(), any()); KubernetesRuntimeFactory.fetchConfigMap(coreV1Api, changeConfigMap, changeConfigNamespace, kubernetesRuntimeFactory); - Mockito.verify(coreV1Api, Mockito.times(1)).readNamespacedConfigMap(eq(changeConfigMap), eq(changeConfigNamespace), eq(null), eq(true), eq(false)); + Mockito.verify(coreV1Api, Mockito.times(1)).readNamespacedConfigMap(eq(changeConfigMap), eq(changeConfigNamespace), eq(null)); KubernetesRuntimeFactory expected = getKuberentesRuntimeFactory(); assertEquals(kubernetesRuntimeFactory, expected); @@ -479,7 +479,7 @@ public void testDynamicConfigMapLoading() throws Exception { configs.put("imagePullPolicy", "test_imagePullPolicy2"); v1ConfigMap.setData(configs); KubernetesRuntimeFactory.fetchConfigMap(coreV1Api, changeConfigMap, changeConfigNamespace, kubernetesRuntimeFactory); - Mockito.verify(coreV1Api, Mockito.times(2)).readNamespacedConfigMap(eq(changeConfigMap), eq(changeConfigNamespace), eq(null), eq(true), eq(false)); + Mockito.verify(coreV1Api, Mockito.times(2)).readNamespacedConfigMap(eq(changeConfigMap), eq(changeConfigNamespace), eq(null)); assertEquals(kubernetesRuntimeFactory.getPulsarDockerImageName(), "test_dockerImage2"); assertEquals(kubernetesRuntimeFactory.getImagePullPolicy(), "test_imagePullPolicy2"); diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java index 1e8194eed443a..7fa279bc1d253 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java @@ -24,16 +24,32 @@ import com.google.gson.JsonObject; import com.google.protobuf.util.JsonFormat; import io.kubernetes.client.custom.Quantity; +import io.kubernetes.client.openapi.ApiException; +import io.kubernetes.client.openapi.apis.AppsV1Api; +import io.kubernetes.client.openapi.apis.CoreV1Api; import io.kubernetes.client.openapi.models.V1Container; +import io.kubernetes.client.openapi.models.V1PodList; import io.kubernetes.client.openapi.models.V1PodSpec; import io.kubernetes.client.openapi.models.V1PodTemplateSpec; import io.kubernetes.client.openapi.models.V1ResourceRequirements; import io.kubernetes.client.openapi.models.V1Service; import io.kubernetes.client.openapi.models.V1StatefulSet; import io.kubernetes.client.openapi.models.V1Toleration; +import java.lang.reflect.Type; +import java.math.BigDecimal; +import java.net.HttpURLConnection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import okhttp3.Call; +import okhttp3.Response; import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang3.SystemUtils; import org.apache.commons.lang3.JavaVersion; +import org.apache.commons.lang3.SystemUtils; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.functions.instance.AuthenticationConfig; import org.apache.pulsar.functions.instance.InstanceConfig; @@ -49,27 +65,26 @@ import org.apache.pulsar.functions.worker.ConnectorsManager; import org.apache.pulsar.functions.worker.FunctionsManager; import org.apache.pulsar.functions.worker.WorkerConfig; +import org.mockito.ArgumentMatcher; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; - -import java.lang.reflect.Type; -import java.math.BigDecimal; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Consumer; -import java.util.stream.Collectors; - import static org.apache.pulsar.functions.runtime.RuntimeUtils.FUNCTIONS_INSTANCE_CLASSPATH; import static org.apache.pulsar.functions.utils.FunctionCommon.roundDecimal; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; @@ -861,6 +876,63 @@ public void testCustomKubernetesDownloadCommandsWithAuth() throws Exception { assertTrue(containerCommand.contains(expectedDownloadCommand), "Found:" + containerCommand); } + @Test + public void testCustomKubernetesDownloadCommandsWithAuthWithoutAuthSpec() throws Exception { + InstanceConfig config = createJavaInstanceConfig(FunctionDetails.Runtime.JAVA, false); + config.setFunctionDetails(createFunctionDetails(FunctionDetails.Runtime.JAVA, false)); + + factory = createKubernetesRuntimeFactory(null, + 10, 1.0, 1.0, Optional.empty(), null, wconfig -> { + wconfig.setAuthenticationEnabled(true); + }, AuthenticationConfig.builder() + .clientAuthenticationPlugin("com.MyAuth") + .clientAuthenticationParameters("{\"authParam1\": \"authParamValue1\"}") + .build()); + + KubernetesRuntime container = factory.createContainer(config, userJarFile, userJarFile, null, null, 30l); + V1StatefulSet spec = container.createStatefulSet(); + String expectedDownloadCommand = "pulsar-admin --admin-url " + pulsarAdminUrl + + " --auth-plugin com.MyAuth --auth-params {\"authParam1\": \"authParamValue1\"}" + + " functions download " + + "--tenant " + TEST_TENANT + + " --namespace " + TEST_NAMESPACE + + " --name " + TEST_NAME + + " --destination-file " + pulsarRootDir + "/" + userJarFile; + String containerCommand = spec.getSpec().getTemplate().getSpec().getContainers().get(0).getCommand().get(2); + assertTrue(containerCommand.contains(expectedDownloadCommand), "Found:" + containerCommand); + } + + @Test + public void testCustomKubernetesDownloadCommandsWithAuthAndCustomTLSWithoutAuthSpec() throws Exception { + InstanceConfig config = createJavaInstanceConfig(FunctionDetails.Runtime.JAVA, false); + config.setFunctionDetails(createFunctionDetails(FunctionDetails.Runtime.JAVA, false)); + + factory = createKubernetesRuntimeFactory(null, + 10, 1.0, 1.0, Optional.empty(), null, wconfig -> { + wconfig.setAuthenticationEnabled(true); + }, AuthenticationConfig.builder() + .clientAuthenticationPlugin("com.MyAuth") + .clientAuthenticationParameters("{\"authParam1\": \"authParamValue1\"}") + .useTls(true) // set to verify it is ignored because pulsar admin does not consider this setting + .tlsHostnameVerificationEnable(true) + .tlsTrustCertsFilePath("/my/ca.pem") + .build()); + + KubernetesRuntime container = factory.createContainer(config, userJarFile, userJarFile, null, null, 30l); + V1StatefulSet spec = container.createStatefulSet(); + String expectedDownloadCommand = "pulsar-admin --admin-url " + pulsarAdminUrl + + " --auth-plugin com.MyAuth --auth-params {\"authParam1\": \"authParamValue1\"}" + + " --tls-enable-hostname-verification" + + " --tls-trust-cert-path /my/ca.pem" + + " functions download " + + "--tenant " + TEST_TENANT + + " --namespace " + TEST_NAMESPACE + + " --name " + TEST_NAME + + " --destination-file " + pulsarRootDir + "/" + userJarFile; + String containerCommand = spec.getSpec().getTemplate().getSpec().getContainers().get(0).getCommand().get(2); + assertTrue(containerCommand.contains(expectedDownloadCommand), "Found:" + containerCommand); + } + InstanceConfig createGolangInstanceConfig() { InstanceConfig config = new InstanceConfig(); @@ -925,7 +997,7 @@ private void verifyGolangInstance(InstanceConfig config) throws Exception { assertEquals(goInstanceConfig.get("disk"), 10000); assertEquals(goInstanceConfig.get("instanceID"), 0); assertEquals(goInstanceConfig.get("cleanupSubscription"), false); - assertEquals(goInstanceConfig.get("port"), 0); + assertEquals(goInstanceConfig.get("port"), 4332); assertEquals(goInstanceConfig.get("subscriptionType"), 0); assertEquals(goInstanceConfig.get("timeoutMs"), 0); assertEquals(goInstanceConfig.get("subscriptionName"), ""); @@ -1233,4 +1305,51 @@ private void assertMetricsPortConfigured(Map functionRuntimeFact .contains("--metrics_port 0")); } } + + @Test + public void testDeleteStatefulSetWithTranslatedKubernetesLabelChars() throws Exception { + InstanceConfig config = createJavaInstanceConfig(FunctionDetails.Runtime.JAVA, false); + config.setFunctionDetails(createFunctionDetails(FunctionDetails.Runtime.JAVA, false, + (fb) -> fb.setTenant("c:tenant").setNamespace("c:ns").setName("c:fn"))); + + CoreV1Api coreApi = mock(CoreV1Api.class); + AppsV1Api appsApi = mock(AppsV1Api.class); + + Call successfulCall = mock(Call.class); + Response okResponse = mock(Response.class); + when(okResponse.code()).thenReturn(HttpURLConnection.HTTP_OK); + when(okResponse.isSuccessful()).thenReturn(true); + when(okResponse.message()).thenReturn(""); + when(successfulCall.execute()).thenReturn(okResponse); + + final String expectedFunctionNamePrefix = String.format("pf-%s-%s-%s", "c-tenant", "c-ns", "c-fn"); + + factory = createKubernetesRuntimeFactory(null, 10, 1.0, 1.0); + factory.setCoreClient(coreApi); + factory.setAppsClient(appsApi); + + ArgumentMatcher hasTranslatedFunctionName = (String t) -> t.startsWith(expectedFunctionNamePrefix); + + when(appsApi.deleteNamespacedStatefulSetCall( + argThat(hasTranslatedFunctionName), + anyString(), isNull(), isNull(), anyInt(), isNull(), anyString(), any(), isNull())).thenReturn(successfulCall); + + ApiException notFoundException = mock(ApiException.class); + when(notFoundException.getCode()).thenReturn(HttpURLConnection.HTTP_NOT_FOUND); + when(appsApi.readNamespacedStatefulSet( + argThat(hasTranslatedFunctionName), anyString(), isNull())).thenThrow(notFoundException); + + V1PodList podList = mock(V1PodList.class); + when(podList.getItems()).thenReturn(Collections.emptyList()); + + String expectedLabels = String.format("tenant=%s,namespace=%s,name=%s", "c-tenant", "c-ns", "c-fn"); + + when(coreApi.listNamespacedPod(anyString(), isNull(), isNull(), isNull(), isNull(), + eq(expectedLabels), isNull(), isNull(), isNull(), isNull(), isNull())).thenReturn(podList); + KubernetesRuntime kr = factory.createContainer(config, "/test/code", "code.yml", "/test/transforms", "transform.yml", Long.MIN_VALUE); + kr.deleteStatefulSet(); + + verify(coreApi).listNamespacedPod(anyString(), isNull(), isNull(), isNull(), isNull(), + eq(expectedLabels), isNull(), isNull(), isNull(), isNull(), isNull()); + } } diff --git a/pulsar-functions/secrets/pom.xml b/pulsar-functions/secrets/pom.xml index 08d4e9bf63ab8..3001025a5951e 100644 --- a/pulsar-functions/secrets/pom.xml +++ b/pulsar-functions/secrets/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-secrets Pulsar Functions :: Secrets - io.kubernetes client-java ${kubernetesclient.version} + + + bcpkix-jdk18on + org.bouncycastle + + + bcutil-jdk18on + org.bouncycastle + + + bcprov-jdk18on + org.bouncycastle + + - ${project.groupId} pulsar-functions-proto @@ -62,7 +73,6 @@
    - com.github.spotbugs spotbugs-maven-plugin @@ -95,5 +105,4 @@
    -
    diff --git a/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml b/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml index 0cab8ae6a418a..7d4464ae1c08d 100644 --- a/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT - pulsar-functions-utils Pulsar Functions :: Utils - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-proto ${project.version} - ${project.groupId} pulsar-functions-api ${project.version} - ${project.groupId} pulsar-config-validation ${project.version} - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.core jackson-annotations - com.fasterxml.jackson.core jackson-databind - com.google.code.gson gson - net.jodah typetools - - ${project.groupId} - pulsar-client-original - ${project.version} + net.bytebuddy + byte-buddy + + + org.zeroturnaround + zt-zip + 1.17 + + + ${project.groupId} + pulsar-client-original + ${project.version} - ${project.groupId} pulsar-package-core ${project.version} - + + com.github.tomakehurst + wiremock-jre8 + ${wiremock.version} + test + @@ -119,7 +119,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -135,5 +134,4 @@ - diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java index 7df173da0f195..6a3d2f6ad7ddb 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java @@ -22,16 +22,9 @@ import com.google.protobuf.AbstractMessage.Builder; import com.google.protobuf.MessageOrBuilder; import com.google.protobuf.util.JsonFormat; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.io.ObjectOutputStream; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.net.MalformedURLException; import java.net.ServerSocket; import java.net.URISyntaxException; import java.net.URL; @@ -41,10 +34,14 @@ import java.util.Collection; import java.util.Map; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import lombok.AccessLevel; import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; -import net.jodah.typetools.TypeResolver; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.description.type.TypeList; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.MessageId; @@ -54,16 +51,11 @@ import org.apache.pulsar.client.impl.auth.AuthenticationDataBasic; import org.apache.pulsar.common.functions.FunctionConfig; import org.apache.pulsar.common.functions.Utils; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.functions.api.Function; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.WindowFunction; import org.apache.pulsar.functions.proto.Function.FunctionDetails.ComponentType; import org.apache.pulsar.functions.proto.Function.FunctionDetails.Runtime; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; import org.apache.pulsar.io.core.BatchSource; import org.apache.pulsar.io.core.Sink; import org.apache.pulsar.io.core.Source; @@ -97,50 +89,74 @@ public static int findAvailablePort() { } } - public static Class[] getFunctionTypes(FunctionConfig functionConfig, ClassLoader classLoader) + public static TypeDefinition[] getFunctionTypes(FunctionConfig functionConfig, TypePool typePool) throws ClassNotFoundException { - return getFunctionTypes(functionConfig, classLoader.loadClass(functionConfig.getClassName())); + return getFunctionTypes(functionConfig, typePool.describe(functionConfig.getClassName()).resolve()); } - public static Class[] getFunctionTypes(FunctionConfig functionConfig, Class functionClass) - throws ClassNotFoundException { + public static TypeDefinition[] getFunctionTypes(FunctionConfig functionConfig, TypeDefinition functionClass) { boolean isWindowConfigPresent = functionConfig.getWindowConfig() != null; return getFunctionTypes(functionClass, isWindowConfigPresent); } - public static Class[] getFunctionTypes(Class userClass, boolean isWindowConfigPresent) { + public static TypeDefinition[] getFunctionTypes(TypeDefinition userClass, boolean isWindowConfigPresent) { Class classParent = getFunctionClassParent(userClass, isWindowConfigPresent); - Class[] typeArgs = TypeResolver.resolveRawArguments(classParent, userClass); + TypeList.Generic typeArgsList = resolveInterfaceTypeArguments(userClass, classParent); + TypeDescription.Generic[] typeArgs = new TypeDescription.Generic[2]; + typeArgs[0] = typeArgsList.get(0); + typeArgs[1] = typeArgsList.get(1); // if window function if (isWindowConfigPresent) { if (classParent.equals(java.util.function.Function.class)) { - if (!typeArgs[0].equals(Collection.class)) { + if (!typeArgs[0].asErasure().isAssignableTo(Collection.class)) { throw new IllegalArgumentException("Window function must take a collection as input"); } - typeArgs[0] = (Class) unwrapType(classParent, userClass, 0); + typeArgs[0] = typeArgs[0].getTypeArguments().get(0); } } - if (typeArgs[1].equals(Record.class)) { - typeArgs[1] = (Class) unwrapType(classParent, userClass, 1); + if (typeArgs[1].asErasure().isAssignableTo(Record.class)) { + typeArgs[1] = typeArgs[1].getTypeArguments().get(0); + } + if (typeArgs[1].asErasure().isAssignableTo(CompletableFuture.class)) { + typeArgs[1] = typeArgs[1].getTypeArguments().get(0); } - return typeArgs; } - public static Class[] getRawFunctionTypes(Class userClass, boolean isWindowConfigPresent) { + private static TypeList.Generic resolveInterfaceTypeArguments(TypeDefinition userClass, Class interfaceClass) { + if (!interfaceClass.isInterface()) { + throw new IllegalArgumentException("interfaceClass must be an interface"); + } + for (TypeDescription.Generic interfaze : userClass.getInterfaces()) { + if (interfaze.asErasure().isAssignableTo(interfaceClass)) { + return interfaze.getTypeArguments(); + } + } + if (userClass.getSuperClass() != null) { + return resolveInterfaceTypeArguments(userClass.getSuperClass(), interfaceClass); + } + return null; + } + + public static TypeDescription.Generic[] getRawFunctionTypes(TypeDefinition userClass, + boolean isWindowConfigPresent) { Class classParent = getFunctionClassParent(userClass, isWindowConfigPresent); - return TypeResolver.resolveRawArguments(classParent, userClass); + TypeList.Generic typeArgsList = resolveInterfaceTypeArguments(userClass, classParent); + TypeDescription.Generic[] typeArgs = new TypeDescription.Generic[2]; + typeArgs[0] = typeArgsList.get(0); + typeArgs[1] = typeArgsList.get(1); + return typeArgs; } - public static Class getFunctionClassParent(Class userClass, boolean isWindowConfigPresent) { + public static Class getFunctionClassParent(TypeDefinition userClass, boolean isWindowConfigPresent) { if (isWindowConfigPresent) { - if (WindowFunction.class.isAssignableFrom(userClass)) { + if (userClass.asErasure().isAssignableTo(WindowFunction.class)) { return WindowFunction.class; } else { return java.util.function.Function.class; } } else { - if (Function.class.isAssignableFrom(userClass)) { + if (userClass.asErasure().isAssignableTo(Function.class)) { return Function.class; } else { return java.util.function.Function.class; @@ -148,41 +164,6 @@ public static Class getFunctionClassParent(Class userClass, boolean isWind } } - private static Type unwrapType(Class type, Class subType, int position) { - Type genericType = TypeResolver.resolveGenericType(type, subType); - Type argType = ((ParameterizedType) genericType).getActualTypeArguments()[position]; - return ((ParameterizedType) argType).getActualTypeArguments()[0]; - } - - public static Object createInstance(String userClassName, ClassLoader classLoader) { - Class theCls; - try { - theCls = Class.forName(userClassName); - } catch (ClassNotFoundException | NoClassDefFoundError cnfe) { - try { - theCls = Class.forName(userClassName, true, classLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new RuntimeException("User class must be in class path", cnfe); - } - } - Object result; - try { - Constructor meth = theCls.getDeclaredConstructor(); - meth.setAccessible(true); - result = meth.newInstance(); - } catch (InstantiationException ie) { - throw new RuntimeException("User class must be concrete", ie); - } catch (NoSuchMethodException e) { - throw new RuntimeException("User class doesn't have such method", e); - } catch (IllegalAccessException e) { - throw new RuntimeException("User class must have a no-arg constructor", e); - } catch (InvocationTargetException e) { - throw new RuntimeException("User class constructor throws exception", e); - } - return result; - - } - public static Runtime convertRuntime(FunctionConfig.Runtime runtime) { for (Runtime type : Runtime.values()) { if (type.name().equals(runtime.name())) { @@ -223,29 +204,34 @@ public static FunctionConfig.ProcessingGuarantees convertProcessingGuarantee( throw new RuntimeException("Unrecognized processing guarantee: " + processingGuarantees.name()); } - public static Class getSourceType(String className, ClassLoader classLoader) throws ClassNotFoundException { - return getSourceType(classLoader.loadClass(className)); + public static TypeDefinition getSourceType(String className, TypePool typePool) { + return getSourceType(typePool.describe(className).resolve()); } - public static Class getSourceType(Class sourceClass) { - - if (Source.class.isAssignableFrom(sourceClass)) { - return TypeResolver.resolveRawArgument(Source.class, sourceClass); - } else if (BatchSource.class.isAssignableFrom(sourceClass)) { - return TypeResolver.resolveRawArgument(BatchSource.class, sourceClass); + public static TypeDefinition getSourceType(TypeDefinition sourceClass) { + if (sourceClass.asErasure().isAssignableTo(Source.class)) { + return resolveInterfaceTypeArguments(sourceClass, Source.class).get(0); + } else if (sourceClass.asErasure().isAssignableTo(BatchSource.class)) { + return resolveInterfaceTypeArguments(sourceClass, BatchSource.class).get(0); } else { throw new IllegalArgumentException( String.format("Source class %s does not implement the correct interface", - sourceClass.getName())); + sourceClass.getActualName())); } } - public static Class getSinkType(String className, ClassLoader classLoader) throws ClassNotFoundException { - return getSinkType(classLoader.loadClass(className)); + public static TypeDefinition getSinkType(String className, TypePool typePool) { + return getSinkType(typePool.describe(className).resolve()); } - public static Class getSinkType(Class sinkClass) { - return TypeResolver.resolveRawArgument(Sink.class, sinkClass); + public static TypeDefinition getSinkType(TypeDefinition sinkClass) { + if (sinkClass.asErasure().isAssignableTo(Sink.class)) { + return resolveInterfaceTypeArguments(sinkClass, Sink.class).get(0); + } else { + throw new IllegalArgumentException( + String.format("Sink class %s does not implement the correct interface", + sinkClass.getActualName())); + } } public static void downloadFromHttpUrl(String destPkgUrl, File targetFile) throws IOException { @@ -264,16 +250,6 @@ public static void downloadFromHttpUrl(String destPkgUrl, File targetFile) throw log.info("Downloading function package from {} to {} completed!", destPkgUrl, targetFile.getAbsoluteFile()); } - public static ClassLoader extractClassLoader(String destPkgUrl) throws IOException, URISyntaxException { - File file = extractFileFromPkgURL(destPkgUrl); - try { - return ClassLoaderUtils.loadJar(file); - } catch (MalformedURLException e) { - throw new IllegalArgumentException( - "Corrupt User PackageFile " + file + " with error " + e.getMessage()); - } - } - public static File createPkgTempFile() throws IOException { return File.createTempFile("functions", ".tmp"); } @@ -297,21 +273,6 @@ public static File extractFileFromPkgURL(String destPkgUrl) throws IOException, } } - public static NarClassLoader extractNarClassLoader(File packageFile, - String narExtractionDirectory) { - if (packageFile != null) { - try { - return NarClassLoaderBuilder.builder() - .narFile(packageFile) - .extractionDirectory(narExtractionDirectory) - .build(); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage()); - } - } - return null; - } - public static String getFullyQualifiedInstanceId(org.apache.pulsar.functions.proto.Function.Instance instance) { return getFullyQualifiedInstanceId( instance.getFunctionMetaData().getFunctionDetails().getTenant(), @@ -345,17 +306,6 @@ public static final MessageId getMessageId(long sequenceId) { return new MessageIdImpl(ledgerId, entryId, -1); } - public static byte[] toByteArray(Object obj) throws IOException { - byte[] bytes = null; - try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ObjectOutputStream oos = new ObjectOutputStream(bos)) { - oos.writeObject(obj); - oos.flush(); - bytes = bos.toByteArray(); - } - return bytes; - } - public static String getUniquePackageName(String packageName) { return String.format("%s-%s", UUID.randomUUID().toString(), packageName); } @@ -403,146 +353,11 @@ private static String extractFromFullyQualifiedName(String fqfn, int index) { throw new RuntimeException("Invalid Fully Qualified Function Name " + fqfn); } - public static Class getTypeArg(String className, Class funClass, ClassLoader classLoader) - throws ClassNotFoundException { - Class loadedClass = classLoader.loadClass(className); - if (!funClass.isAssignableFrom(loadedClass)) { - throw new IllegalArgumentException( - String.format("class %s is not type of %s", className, funClass.getName())); - } - return TypeResolver.resolveRawArgument(funClass, loadedClass); - } - public static double roundDecimal(double value, int places) { double scale = Math.pow(10, places); return Math.round(value * scale) / scale; } - public static ClassLoader getClassLoaderFromPackage( - ComponentType componentType, - String className, - File packageFile, - String narExtractionDirectory) { - String connectorClassName = className; - ClassLoader jarClassLoader = null; - boolean keepJarClassLoader = false; - ClassLoader narClassLoader = null; - boolean keepNarClassLoader = false; - - Exception jarClassLoaderException = null; - Exception narClassLoaderException = null; - - try { - try { - jarClassLoader = ClassLoaderUtils.extractClassLoader(packageFile); - } catch (Exception e) { - jarClassLoaderException = e; - } - try { - narClassLoader = FunctionCommon.extractNarClassLoader(packageFile, narExtractionDirectory); - } catch (Exception e) { - narClassLoaderException = e; - } - - // if connector class name is not provided, we can only try to load archive as a NAR - if (isEmpty(connectorClassName)) { - if (narClassLoader == null) { - throw new IllegalArgumentException(String.format("%s package does not have the correct format. " - + "Pulsar cannot determine if the package is a NAR package or JAR package. " - + "%s classname is not provided and attempts to load it as a NAR package produced " - + "the following error.", - capFirstLetter(componentType), capFirstLetter(componentType)), - narClassLoaderException); - } - try { - if (componentType == ComponentType.FUNCTION) { - connectorClassName = FunctionUtils.getFunctionClass(narClassLoader); - } else if (componentType == ComponentType.SOURCE) { - connectorClassName = ConnectorUtils.getIOSourceClass((NarClassLoader) narClassLoader); - } else { - connectorClassName = ConnectorUtils.getIOSinkClass((NarClassLoader) narClassLoader); - } - } catch (IOException e) { - throw new IllegalArgumentException(String.format("Failed to extract %s class from archive", - componentType.toString().toLowerCase()), e); - } - - try { - narClassLoader.loadClass(connectorClassName); - keepNarClassLoader = true; - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", capFirstLetter(componentType), - connectorClassName), e); - } - - } else { - // if connector class name is provided, we need to try to load it as a JAR and as a NAR. - if (jarClassLoader != null) { - try { - jarClassLoader.loadClass(connectorClassName); - keepJarClassLoader = true; - return jarClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e) { - // class not found in JAR try loading as a NAR and searching for the class - if (narClassLoader != null) { - - try { - narClassLoader.loadClass(connectorClassName); - keepNarClassLoader = true; - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e1) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", - capFirstLetter(componentType), connectorClassName), e1); - } - } else { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", capFirstLetter(componentType), - connectorClassName), e); - } - } - } else if (narClassLoader != null) { - try { - narClassLoader.loadClass(connectorClassName); - keepNarClassLoader = true; - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e1) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", - capFirstLetter(componentType), connectorClassName), e1); - } - } else { - StringBuilder errorMsg = new StringBuilder(capFirstLetter(componentType) - + " package does not have the correct format." - + " Pulsar cannot determine if the package is a NAR package or JAR package."); - - if (jarClassLoaderException != null) { - errorMsg.append( - " Attempts to load it as a JAR package produced error: " + jarClassLoaderException - .getMessage()); - } - - if (narClassLoaderException != null) { - errorMsg.append( - " Attempts to load it as a NAR package produced error: " + narClassLoaderException - .getMessage()); - } - - throw new IllegalArgumentException(errorMsg.toString()); - } - } - } finally { - if (!keepJarClassLoader) { - ClassLoaderUtils.closeClassLoader(jarClassLoader); - } - if (!keepNarClassLoader) { - ClassLoaderUtils.closeClassLoader(narClassLoader); - } - } - } - public static String capFirstLetter(Enum en) { return StringUtils.capitalize(en.toString().toLowerCase()); } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java index d20d50df1bdd1..a2a1764692e86 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java @@ -18,12 +18,11 @@ */ package org.apache.pulsar.functions.utils; -import static org.apache.commons.lang.StringUtils.isBlank; -import static org.apache.commons.lang.StringUtils.isNotBlank; -import static org.apache.commons.lang.StringUtils.isNotEmpty; +import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isEmpty; +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.pulsar.common.functions.Utils.BUILTIN; -import static org.apache.pulsar.common.util.ClassLoaderUtils.loadJar; import static org.apache.pulsar.functions.utils.FunctionCommon.convertFromCompressionType; import static org.apache.pulsar.functions.utils.FunctionCommon.convertFromFunctionDetailsCompressionType; import static org.apache.pulsar.functions.utils.FunctionCommon.convertFromFunctionDetailsSubscriptionPosition; @@ -32,9 +31,7 @@ import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; import java.io.File; -import java.io.IOException; import java.lang.reflect.Type; -import java.net.MalformedURLException; import java.util.Collection; import java.util.HashMap; import java.util.LinkedList; @@ -44,10 +41,13 @@ import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.pool.TypePool; +import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.functions.ProducerConfig; import org.apache.pulsar.common.functions.Resources; import org.apache.pulsar.common.functions.WindowConfig; @@ -55,7 +55,6 @@ import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; @Slf4j public class FunctionConfigUtils { @@ -74,26 +73,21 @@ public static class ExtractedFunctionDetails { private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.create(); - public static FunctionDetails convert(FunctionConfig functionConfig, ClassLoader classLoader) - throws IllegalArgumentException { + public static FunctionDetails convert(FunctionConfig functionConfig) { + return convert(functionConfig, (ValidatableFunctionPackage) null); + } - if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA) { - if (classLoader != null) { - try { - Class[] typeArgs = FunctionCommon.getFunctionTypes(functionConfig, classLoader); - return convert( - functionConfig, - new ExtractedFunctionDetails( - functionConfig.getClassName(), - typeArgs[0].getName(), - typeArgs[1].getName())); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionConfig.getClassName()), e); - } - } + public static FunctionDetails convert(FunctionConfig functionConfig, + ValidatableFunctionPackage validatableFunctionPackage) + throws IllegalArgumentException { + if (functionConfig == null) { + throw new IllegalArgumentException("Function config is not provided"); + } + if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA && validatableFunctionPackage != null) { + return convert(functionConfig, doJavaChecks(functionConfig, validatableFunctionPackage)); + } else { + return convert(functionConfig, new ExtractedFunctionDetails(functionConfig.getClassName(), null, null)); } - return convert(functionConfig, new ExtractedFunctionDetails(functionConfig.getClassName(), null, null)); } public static FunctionDetails convert(FunctionConfig functionConfig, ExtractedFunctionDetails extractedDetails) @@ -280,6 +274,12 @@ public static FunctionDetails convert(FunctionConfig functionConfig, ExtractedFu } sinkSpecBuilder.setProducerSpec(pbldr.build()); } + if (functionConfig.getBatchBuilder() != null) { + Function.ProducerSpec.Builder builder = sinkSpecBuilder.getProducerSpec() != null + ? sinkSpecBuilder.getProducerSpec().toBuilder() + : Function.ProducerSpec.newBuilder(); + sinkSpecBuilder.setProducerSpec(builder.setBatchBuilder(functionConfig.getBatchBuilder()).build()); + } functionDetailsBuilder.setSink(sinkSpecBuilder); if (functionConfig.getTenant() != null) { @@ -587,48 +587,49 @@ public static void inferMissingArguments(FunctionConfig functionConfig, } } - public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfig, ClassLoader clsLoader) { + public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfig, + ValidatableFunctionPackage validatableFunctionPackage) { - String functionClassName = functionConfig.getClassName(); - Class functionClass; + String functionClassName = StringUtils.trimToNull(functionConfig.getClassName()); + TypeDefinition functionClass; try { // if class name in function config is not set, this should be a built-in function // thus we should try to find its class name in the NAR service definition if (functionClassName == null) { - try { - functionClassName = FunctionUtils.getFunctionClass(clsLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract source class from archive", e); + FunctionDefinition functionDefinition = + validatableFunctionPackage.getFunctionMetaData(FunctionDefinition.class); + if (functionDefinition == null) { + throw new IllegalArgumentException("Function class name is not provided."); + } + functionClassName = functionDefinition.getFunctionClass(); + if (functionClassName == null) { + throw new IllegalArgumentException("Function class name is not provided."); } } - functionClass = clsLoader.loadClass(functionClassName); + functionClass = validatableFunctionPackage.resolveType(functionClassName); - if (!org.apache.pulsar.functions.api.Function.class.isAssignableFrom(functionClass) - && !java.util.function.Function.class.isAssignableFrom(functionClass) - && !org.apache.pulsar.functions.api.WindowFunction.class.isAssignableFrom(functionClass)) { + if (!functionClass.asErasure().isAssignableTo(org.apache.pulsar.functions.api.Function.class) + && !functionClass.asErasure().isAssignableTo(java.util.function.Function.class) + && !functionClass.asErasure() + .isAssignableTo(org.apache.pulsar.functions.api.WindowFunction.class)) { throw new IllegalArgumentException( String.format("Function class %s does not implement the correct interface", - functionClass.getName())); + functionClassName)); } - } catch (ClassNotFoundException | NoClassDefFoundError e) { + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionConfig.getClassName()), e); + String.format("Function class %s must be in class path", functionClassName), e); } - Class[] typeArgs; - try { - typeArgs = FunctionCommon.getFunctionTypes(functionConfig, functionClass); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionConfig.getClassName()), e); - } + TypeDefinition[] typeArgs = FunctionCommon.getFunctionTypes(functionConfig, functionClass); // inputs use default schema, so there is no check needed there // Check if the Input serialization/deserialization class exists in jar or already loaded and that it // implements SerDe class if (functionConfig.getCustomSerdeInputs() != null) { functionConfig.getCustomSerdeInputs().forEach((topicName, inputSerializer) -> { - ValidatorUtils.validateSerde(inputSerializer, typeArgs[0], clsLoader, true); + ValidatorUtils.validateSerde(inputSerializer, typeArgs[0], validatableFunctionPackage.getTypePool(), + true); }); } @@ -643,8 +644,8 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi throw new IllegalArgumentException( String.format("Topic %s has an incorrect schema Info", topicName)); } - ValidatorUtils.validateSchema(consumerConfig.getSchemaType(), typeArgs[0], clsLoader, true); - + ValidatorUtils.validateSchema(consumerConfig.getSchemaType(), typeArgs[0], + validatableFunctionPackage.getTypePool(), true); }); } @@ -659,13 +660,16 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi "Only one of schemaType or serdeClassName should be set in inputSpec"); } if (!isEmpty(conf.getSerdeClassName())) { - ValidatorUtils.validateSerde(conf.getSerdeClassName(), typeArgs[0], clsLoader, true); + ValidatorUtils.validateSerde(conf.getSerdeClassName(), typeArgs[0], + validatableFunctionPackage.getTypePool(), true); } if (!isEmpty(conf.getSchemaType())) { - ValidatorUtils.validateSchema(conf.getSchemaType(), typeArgs[0], clsLoader, true); + ValidatorUtils.validateSchema(conf.getSchemaType(), typeArgs[0], + validatableFunctionPackage.getTypePool(), true); } if (conf.getCryptoConfig() != null) { - ValidatorUtils.validateCryptoKeyReader(conf.getCryptoConfig(), clsLoader, false); + ValidatorUtils.validateCryptoKeyReader(conf.getCryptoConfig(), + validatableFunctionPackage.getTypePool(), false); } }); } @@ -673,8 +677,8 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi if (Void.class.equals(typeArgs[1])) { return new FunctionConfigUtils.ExtractedFunctionDetails( functionClassName, - typeArgs[0].getName(), - typeArgs[1].getName()); + typeArgs[0].asErasure().getTypeName(), + typeArgs[1].asErasure().getTypeName()); } // One and only one of outputSchemaType and outputSerdeClassName should be set @@ -684,22 +688,25 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi } if (!isEmpty(functionConfig.getOutputSchemaType())) { - ValidatorUtils.validateSchema(functionConfig.getOutputSchemaType(), typeArgs[1], clsLoader, false); + ValidatorUtils.validateSchema(functionConfig.getOutputSchemaType(), typeArgs[1], + validatableFunctionPackage.getTypePool(), false); } if (!isEmpty(functionConfig.getOutputSerdeClassName())) { - ValidatorUtils.validateSerde(functionConfig.getOutputSerdeClassName(), typeArgs[1], clsLoader, false); + ValidatorUtils.validateSerde(functionConfig.getOutputSerdeClassName(), typeArgs[1], + validatableFunctionPackage.getTypePool(), false); } if (functionConfig.getProducerConfig() != null && functionConfig.getProducerConfig().getCryptoConfig() != null) { ValidatorUtils - .validateCryptoKeyReader(functionConfig.getProducerConfig().getCryptoConfig(), clsLoader, true); + .validateCryptoKeyReader(functionConfig.getProducerConfig().getCryptoConfig(), + validatableFunctionPackage.getTypePool(), true); } return new FunctionConfigUtils.ExtractedFunctionDetails( functionClassName, - typeArgs[0].getName(), - typeArgs[1].getName()); + typeArgs[0].asErasure().getTypeName(), + typeArgs[1].asErasure().getTypeName()); } private static void doPythonChecks(FunctionConfig functionConfig) { @@ -748,7 +755,7 @@ private static void verifyNoTopicClash(Collection inputTopics, String ou } } - private static void doCommonChecks(FunctionConfig functionConfig) { + public static void doCommonChecks(FunctionConfig functionConfig) { if (isEmpty(functionConfig.getTenant())) { throw new IllegalArgumentException("Function tenant cannot be null"); } @@ -890,7 +897,7 @@ private static void doCommonChecks(FunctionConfig functionConfig) { } } - private static Collection collectAllInputTopics(FunctionConfig functionConfig) { + public static Collection collectAllInputTopics(FunctionConfig functionConfig) { List retval = new LinkedList<>(); if (functionConfig.getInputs() != null) { retval.addAll(functionConfig.getInputs()); @@ -910,47 +917,21 @@ private static Collection collectAllInputTopics(FunctionConfig functionC return retval; } - public static ClassLoader validate(FunctionConfig functionConfig, File functionPackageFile) { + public static void validateNonJavaFunction(FunctionConfig functionConfig) { doCommonChecks(functionConfig); - if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA) { - ClassLoader classLoader; - if (functionPackageFile != null) { - try { - classLoader = loadJar(functionPackageFile); - } catch (MalformedURLException e) { - throw new IllegalArgumentException("Corrupted Jar File", e); - } - } else if (!isEmpty(functionConfig.getJar())) { - File jarFile = new File(functionConfig.getJar()); - if (!jarFile.exists()) { - throw new IllegalArgumentException("Jar file does not exist"); - } - try { - classLoader = loadJar(jarFile); - } catch (Exception e) { - throw new IllegalArgumentException("Corrupted Jar File", e); - } - } else { - throw new IllegalArgumentException("Function Package is not provided"); - } - - doJavaChecks(functionConfig, classLoader); - return classLoader; - } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.GO) { + if (functionConfig.getRuntime() == FunctionConfig.Runtime.GO) { doGolangChecks(functionConfig); - return null; } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.PYTHON) { doPythonChecks(functionConfig); - return null; } else { throw new IllegalArgumentException("Function language runtime is either not set or cannot be determined"); } } public static ExtractedFunctionDetails validateJavaFunction(FunctionConfig functionConfig, - ClassLoader classLoader) { + ValidatableFunctionPackage validatableFunctionPackage) { doCommonChecks(functionConfig); - return doJavaChecks(functionConfig, classLoader); + return doJavaChecks(functionConfig, validatableFunctionPackage); } public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { @@ -1092,6 +1073,9 @@ public static FunctionConfig validateUpdate(FunctionConfig existingConfig, Funct if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } + if (newConfig.getProducerConfig() != null) { + mergedConfig.setProducerConfig(newConfig.getProducerConfig()); + } return mergedConfig; } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionFilePackage.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionFilePackage.java new file mode 100644 index 0000000000000..8224de32521fb --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionFilePackage.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.MalformedURLException; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.List; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.pool.TypePool; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.nar.NarClassLoaderBuilder; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.zeroturnaround.zip.ZipUtil; + +/** + * FunctionFilePackage is a class that represents a function package and + * implements the ValidatableFunctionPackage interface which decouples the + * function package from classloading. + */ +public class FunctionFilePackage implements AutoCloseable, ValidatableFunctionPackage { + private final File file; + private final ClassFileLocator.Compound classFileLocator; + private final TypePool typePool; + private final boolean isNar; + private final String narExtractionDirectory; + private final boolean enableClassloading; + + private ClassLoader classLoader; + + private final Object configMetadata; + + public FunctionFilePackage(File file, String narExtractionDirectory, boolean enableClassloading, + Class configClass) { + this.file = file; + boolean nonZeroFile = file.isFile() && file.length() > 0; + this.isNar = nonZeroFile ? ZipUtil.containsAnyEntry(file, + new String[] {"META-INF/services/pulsar-io.yaml", "META-INF/bundled-dependencies"}) : false; + this.narExtractionDirectory = narExtractionDirectory; + this.enableClassloading = enableClassloading; + if (isNar) { + List classpathFromArchive = null; + try { + classpathFromArchive = NarClassLoader.getClasspathFromArchive(file, narExtractionDirectory); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + List classFileLocators = new ArrayList<>(); + classFileLocators.add(ClassFileLocator.ForClassLoader.ofSystemLoader()); + for (File classpath : classpathFromArchive) { + if (classpath.exists()) { + try { + ClassFileLocator locator; + if (classpath.isDirectory()) { + locator = new ClassFileLocator.ForFolder(classpath); + } else { + locator = ClassFileLocator.ForJarFile.of(classpath); + } + classFileLocators.add(locator); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + this.classFileLocator = new ClassFileLocator.Compound(classFileLocators); + this.typePool = TypePool.Default.of(classFileLocator); + try { + this.configMetadata = FunctionUtils.getPulsarIOServiceConfig(file, configClass); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + try { + this.classFileLocator = nonZeroFile + ? new ClassFileLocator.Compound(ClassFileLocator.ForClassLoader.ofSystemLoader(), + ClassFileLocator.ForJarFile.of(file)) : + new ClassFileLocator.Compound(ClassFileLocator.ForClassLoader.ofSystemLoader()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + this.typePool = + TypePool.Default.of(classFileLocator); + this.configMetadata = null; + } + } + + public TypeDescription resolveType(String className) { + return typePool.describe(className).resolve(); + } + + public boolean isNar() { + return isNar; + } + + public File getFile() { + return file; + } + + public TypePool getTypePool() { + return typePool; + } + + @Override + public T getFunctionMetaData(Class clazz) { + return configMetadata != null ? clazz.cast(configMetadata) : null; + } + + @Override + public synchronized void close() throws IOException { + classFileLocator.close(); + if (classLoader instanceof Closeable) { + ((Closeable) classLoader).close(); + } + } + + public boolean isEnableClassloading() { + return enableClassloading; + } + + public synchronized ClassLoader getClassLoader() { + if (classLoader == null) { + classLoader = createClassLoader(); + } + return classLoader; + } + + private ClassLoader createClassLoader() { + if (enableClassloading) { + if (isNar) { + try { + return NarClassLoaderBuilder.builder() + .narFile(file) + .extractionDirectory(narExtractionDirectory) + .build(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + try { + return new URLClassLoader(new java.net.URL[] {file.toURI().toURL()}, + NarClassLoader.class.getClassLoader()); + } catch (MalformedURLException e) { + throw new UncheckedIOException(e); + } + } + } else { + throw new IllegalStateException("Classloading is not enabled"); + } + } + + @Override + public String toString() { + return "FunctionFilePackage{" + + "file=" + file + + ", isNar=" + isNar + + '}'; + } +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionRuntimeCommon.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionRuntimeCommon.java new file mode 100644 index 0000000000000..ed17478dd00ed --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionRuntimeCommon.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import static org.apache.commons.lang3.StringUtils.isEmpty; +import java.io.File; +import java.io.IOException; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.nar.NarClassLoaderBuilder; +import org.apache.pulsar.common.util.ClassLoaderUtils; +import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.apache.pulsar.functions.utils.io.ConnectorUtils; + +public class FunctionRuntimeCommon { + public static NarClassLoader extractNarClassLoader(File packageFile, + String narExtractionDirectory) { + if (packageFile != null) { + try { + return NarClassLoaderBuilder.builder() + .narFile(packageFile) + .extractionDirectory(narExtractionDirectory) + .build(); + } catch (IOException e) { + throw new IllegalArgumentException(e.getMessage()); + } + } + return null; + } + + public static ClassLoader getClassLoaderFromPackage( + Function.FunctionDetails.ComponentType componentType, + String className, + File packageFile, + String narExtractionDirectory) { + String connectorClassName = className; + ClassLoader jarClassLoader = null; + boolean keepJarClassLoader = false; + NarClassLoader narClassLoader = null; + boolean keepNarClassLoader = false; + + Exception jarClassLoaderException = null; + Exception narClassLoaderException = null; + + try { + try { + jarClassLoader = ClassLoaderUtils.extractClassLoader(packageFile); + } catch (Exception e) { + jarClassLoaderException = e; + } + try { + narClassLoader = extractNarClassLoader(packageFile, narExtractionDirectory); + } catch (Exception e) { + narClassLoaderException = e; + } + + // if connector class name is not provided, we can only try to load archive as a NAR + if (isEmpty(connectorClassName)) { + if (narClassLoader == null) { + throw new IllegalArgumentException(String.format("%s package does not have the correct format. " + + "Pulsar cannot determine if the package is a NAR package or JAR package. " + + "%s classname is not provided and attempts to load it as a NAR package produced " + + "the following error.", + FunctionCommon.capFirstLetter(componentType), FunctionCommon.capFirstLetter(componentType)), + narClassLoaderException); + } + try { + if (componentType == Function.FunctionDetails.ComponentType.FUNCTION) { + connectorClassName = FunctionUtils.getFunctionClass(narClassLoader); + } else if (componentType == Function.FunctionDetails.ComponentType.SOURCE) { + connectorClassName = ConnectorUtils.getIOSourceClass(narClassLoader); + } else { + connectorClassName = ConnectorUtils.getIOSinkClass(narClassLoader); + } + } catch (IOException e) { + throw new IllegalArgumentException(String.format("Failed to extract %s class from archive", + componentType.toString().toLowerCase()), e); + } + + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e) { + throw new IllegalArgumentException(String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e); + } + + } else { + // if connector class name is provided, we need to try to load it as a JAR and as a NAR. + if (jarClassLoader != null) { + try { + jarClassLoader.loadClass(connectorClassName); + keepJarClassLoader = true; + return jarClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e) { + // class not found in JAR try loading as a NAR and searching for the class + if (narClassLoader != null) { + + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e1) { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e1); + } + } else { + throw new IllegalArgumentException(String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e); + } + } + } else if (narClassLoader != null) { + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e1) { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e1); + } + } else { + StringBuilder errorMsg = new StringBuilder(FunctionCommon.capFirstLetter(componentType) + + " package does not have the correct format." + + " Pulsar cannot determine if the package is a NAR package or JAR package."); + + if (jarClassLoaderException != null) { + errorMsg.append( + " Attempts to load it as a JAR package produced error: " + jarClassLoaderException + .getMessage()); + } + + if (narClassLoaderException != null) { + errorMsg.append( + " Attempts to load it as a NAR package produced error: " + narClassLoaderException + .getMessage()); + } + + throw new IllegalArgumentException(errorMsg.toString()); + } + } + } finally { + if (!keepJarClassLoader) { + ClassLoaderUtils.closeClassLoader(jarClassLoader); + } + if (!keepNarClassLoader) { + ClassLoaderUtils.closeClassLoader(narClassLoader); + } + } + } + +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/LoadedFunctionPackage.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/LoadedFunctionPackage.java new file mode 100644 index 0000000000000..e27ed0eca1973 --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/LoadedFunctionPackage.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.pool.TypePool; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; + +/** + * LoadedFunctionPackage is a class that represents a function package and + * implements the ValidatableFunctionPackage interface which decouples the + * function package from classloading. This implementation is backed by + * a ClassLoader, and it is used when the function package is already loaded + * by a ClassLoader. This is the case in the LocalRunner and in some of + * the unit tests. + */ +public class LoadedFunctionPackage implements ValidatableFunctionPackage { + private final ClassLoader classLoader; + private final Object configMetadata; + private final TypePool typePool; + + public LoadedFunctionPackage(ClassLoader classLoader, Class configMetadataClass, T configMetadata) { + this.classLoader = classLoader; + this.configMetadata = configMetadata; + typePool = TypePool.Default.of( + ClassFileLocator.ForClassLoader.of(classLoader)); + } + + public LoadedFunctionPackage(ClassLoader classLoader, Class configMetadataClass) { + this.classLoader = classLoader; + if (classLoader instanceof NarClassLoader) { + try { + configMetadata = FunctionUtils.getPulsarIOServiceConfig((NarClassLoader) classLoader, + configMetadataClass); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + configMetadata = null; + } + typePool = TypePool.Default.of( + ClassFileLocator.ForClassLoader.of(classLoader)); + } + + @Override + public TypeDescription resolveType(String className) { + return typePool.describe(className).resolve(); + } + + @Override + public TypePool getTypePool() { + return typePool; + } + + @Override + public T getFunctionMetaData(Class clazz) { + return configMetadata != null ? clazz.cast(configMetadata) : null; + } + + @Override + public boolean isEnableClassloading() { + return true; + } + + @Override + public ClassLoader getClassLoader() { + return classLoader; + } +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java index d79f787588c95..a66751b4e634a 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java @@ -41,23 +41,23 @@ import lombok.Setter; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang.StringUtils; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.functions.Resources; import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.nar.NarClassLoader; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.config.validation.ConfigValidation; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; @Slf4j public class SinkConfigUtils { @@ -216,6 +216,13 @@ public static FunctionDetails convert(SinkConfig sinkConfig, ExtractedSinkDetail functionDetailsBuilder.setSource(sourceSpecBuilder); + if (sinkConfig.getRetainKeyOrdering() != null) { + functionDetailsBuilder.setRetainKeyOrdering(sinkConfig.getRetainKeyOrdering()); + } + if (sinkConfig.getRetainOrdering() != null) { + functionDetailsBuilder.setRetainOrdering(sinkConfig.getRetainOrdering()); + } + if (sinkConfig.getMaxMessageRetries() != null && sinkConfig.getMaxMessageRetries() > 0) { Function.RetryDetails.Builder retryDetails = Function.RetryDetails.newBuilder(); retryDetails.setMaxMessageRetries(sinkConfig.getMaxMessageRetries()); @@ -402,8 +409,8 @@ public static SinkConfig convertFromDetails(FunctionDetails functionDetails) { } public static ExtractedSinkDetails validateAndExtractDetails(SinkConfig sinkConfig, - ClassLoader sinkClassLoader, - ClassLoader functionClassLoader, + ValidatableFunctionPackage sinkFunction, + ValidatableFunctionPackage transformFunction, boolean validateConnectorConfig) { if (isEmpty(sinkConfig.getTenant())) { throw new IllegalArgumentException("Sink tenant cannot be null"); @@ -443,63 +450,72 @@ public static ExtractedSinkDetails validateAndExtractDetails(SinkConfig sinkConf // if class name in sink config is not set, this should be a built-in sink // thus we should try to find it class name in the NAR service definition if (sinkClassName == null) { - try { - sinkClassName = ConnectorUtils.getIOSinkClass((NarClassLoader) sinkClassLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract sink class from archive", e); + ConnectorDefinition connectorDefinition = sinkFunction.getFunctionMetaData(ConnectorDefinition.class); + if (connectorDefinition == null) { + throw new IllegalArgumentException( + "Sink package doesn't contain the META-INF/services/pulsar-io.yaml file."); + } + sinkClassName = connectorDefinition.getSinkClass(); + if (sinkClassName == null) { + throw new IllegalArgumentException("Failed to extract sink class from archive"); } } // check if sink implements the correct interfaces - Class sinkClass; + TypeDefinition sinkClass; try { - sinkClass = sinkClassLoader.loadClass(sinkClassName); - } catch (ClassNotFoundException e) { + sinkClass = sinkFunction.resolveType(sinkClassName); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("Sink class %s not found in class loader", sinkClassName), e); + String.format("Sink class %s not found", sinkClassName), e); } String functionClassName = sinkConfig.getTransformFunctionClassName(); - Class typeArg; - ClassLoader inputClassLoader; - if (functionClassLoader != null) { + TypeDefinition typeArg; + ValidatableFunctionPackage inputFunction; + if (transformFunction != null) { // if function class name in sink config is not set, this should be a built-in function // thus we should try to find it class name in the NAR service definition if (functionClassName == null) { - try { - functionClassName = FunctionUtils.getFunctionClass(functionClassLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract function class from archive", e); + FunctionDefinition functionDefinition = + transformFunction.getFunctionMetaData(FunctionDefinition.class); + if (functionDefinition == null) { + throw new IllegalArgumentException( + "Function package doesn't contain the META-INF/services/pulsar-io.yaml file."); + } + functionClassName = functionDefinition.getFunctionClass(); + if (functionClassName == null) { + throw new IllegalArgumentException("Transform function class name must be set"); } } - Class functionClass; + TypeDefinition functionClass; try { - functionClass = functionClassLoader.loadClass(functionClassName); - } catch (ClassNotFoundException e) { + functionClass = transformFunction.resolveType(functionClassName); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("Function class %s not found in class loader", functionClassName), e); + String.format("Function class %s not found", functionClassName), e); } // extract type from transform function class - if (!getRawFunctionTypes(functionClass, false)[1].equals(Record.class)) { + if (!getRawFunctionTypes(functionClass, false)[1].asErasure().isAssignableTo(Record.class)) { throw new IllegalArgumentException("Sink transform function output must be of type Record"); } typeArg = getFunctionTypes(functionClass, false)[0]; - inputClassLoader = functionClassLoader; + inputFunction = transformFunction; } else { // extract type from sink class typeArg = getSinkType(sinkClass); - inputClassLoader = sinkClassLoader; + inputFunction = sinkFunction; } if (sinkConfig.getTopicToSerdeClassName() != null) { for (String serdeClassName : sinkConfig.getTopicToSerdeClassName().values()) { - ValidatorUtils.validateSerde(serdeClassName, typeArg, inputClassLoader, true); + ValidatorUtils.validateSerde(serdeClassName, typeArg, inputFunction.getTypePool(), true); } } if (sinkConfig.getTopicToSchemaType() != null) { for (String schemaType : sinkConfig.getTopicToSchemaType().values()) { - ValidatorUtils.validateSchema(schemaType, typeArg, inputClassLoader, true); + ValidatorUtils.validateSchema(schemaType, typeArg, inputFunction.getTypePool(), true); } } @@ -512,26 +528,46 @@ public static ExtractedSinkDetails validateAndExtractDetails(SinkConfig sinkConf throw new IllegalArgumentException("Only one of serdeClassName or schemaType should be set"); } if (!isEmpty(consumerSpec.getSerdeClassName())) { - ValidatorUtils.validateSerde(consumerSpec.getSerdeClassName(), typeArg, inputClassLoader, true); + ValidatorUtils.validateSerde(consumerSpec.getSerdeClassName(), typeArg, + inputFunction.getTypePool(), true); } if (!isEmpty(consumerSpec.getSchemaType())) { - ValidatorUtils.validateSchema(consumerSpec.getSchemaType(), typeArg, inputClassLoader, true); + ValidatorUtils.validateSchema(consumerSpec.getSchemaType(), typeArg, + inputFunction.getTypePool(), true); } if (consumerSpec.getCryptoConfig() != null) { - ValidatorUtils.validateCryptoKeyReader(consumerSpec.getCryptoConfig(), inputClassLoader, false); + ValidatorUtils.validateCryptoKeyReader(consumerSpec.getCryptoConfig(), + inputFunction.getTypePool(), false); } } } - // validate user defined config if enabled and sink is loaded from NAR - if (validateConnectorConfig && sinkClassLoader instanceof NarClassLoader) { - validateSinkConfig(sinkConfig, (NarClassLoader) sinkClassLoader); + if (sinkConfig.getRetainKeyOrdering() != null + && sinkConfig.getRetainKeyOrdering() + && sinkConfig.getProcessingGuarantees() != null + && sinkConfig.getProcessingGuarantees() == FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE) { + throw new IllegalArgumentException( + "When effectively once processing guarantee is specified, retain Key ordering cannot be set"); + } + + if (sinkConfig.getRetainKeyOrdering() != null && sinkConfig.getRetainKeyOrdering() + && sinkConfig.getRetainOrdering() != null && sinkConfig.getRetainOrdering()) { + throw new IllegalArgumentException("Only one of retain ordering or retain key ordering can be set"); + } + + // validate user defined config if enabled and classloading is enabled + if (validateConnectorConfig) { + if (sinkFunction.isEnableClassloading()) { + validateSinkConfig(sinkConfig, sinkFunction); + } else { + log.warn("Skipping annotation based validation of sink config as classloading is disabled"); + } } - return new ExtractedSinkDetails(sinkClassName, typeArg.getName(), functionClassName); + return new ExtractedSinkDetails(sinkClassName, typeArg.asErasure().getTypeName(), functionClassName); } - private static Collection collectAllInputTopics(SinkConfig sinkConfig) { + public static Collection collectAllInputTopics(SinkConfig sinkConfig) { List retval = new LinkedList<>(); if (sinkConfig.getInputs() != null) { retval.addAll(sinkConfig.getInputs()); @@ -687,29 +723,13 @@ public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig ne return mergedConfig; } - public static void validateSinkConfig(SinkConfig sinkConfig, NarClassLoader narClassLoader) { - - if (sinkConfig.getRetainKeyOrdering() != null - && sinkConfig.getRetainKeyOrdering() - && sinkConfig.getProcessingGuarantees() != null - && sinkConfig.getProcessingGuarantees() == FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE) { - throw new IllegalArgumentException( - "When effectively once processing guarantee is specified, retain Key ordering cannot be set"); - } - - if (sinkConfig.getRetainKeyOrdering() != null && sinkConfig.getRetainKeyOrdering() - && sinkConfig.getRetainOrdering() != null && sinkConfig.getRetainOrdering()) { - throw new IllegalArgumentException("Only one of retain ordering or retain key ordering can be set"); - } - + public static void validateSinkConfig(SinkConfig sinkConfig, ValidatableFunctionPackage sinkFunction) { try { - ConnectorDefinition defn = ConnectorUtils.getConnectorDefinition(narClassLoader); - if (defn.getSinkConfigClass() != null) { - Class configClass = Class.forName(defn.getSinkConfigClass(), true, narClassLoader); + ConnectorDefinition defn = sinkFunction.getFunctionMetaData(ConnectorDefinition.class); + if (defn != null && defn.getSinkConfigClass() != null) { + Class configClass = Class.forName(defn.getSinkConfigClass(), true, sinkFunction.getClassLoader()); validateSinkConfig(sinkConfig, configClass); } - } catch (IOException e) { - throw new IllegalArgumentException("Error validating sink config", e); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Could not find sink config class", e); } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java index ec0c5c444ccba..a6430bbea4585 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java @@ -35,7 +35,9 @@ import lombok.Setter; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; -import net.jodah.typetools.TypeResolver; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.functions.ProducerConfig; import org.apache.pulsar.common.functions.Resources; @@ -44,13 +46,11 @@ import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.nar.NarClassLoader; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.config.validation.ConfigValidation; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; import org.apache.pulsar.io.core.BatchSource; import org.apache.pulsar.io.core.Source; @@ -294,7 +294,7 @@ public static SourceConfig convertFromDetails(FunctionDetails functionDetails) { } public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sourceConfig, - ClassLoader sourceClassLoader, + ValidatableFunctionPackage sourceFunction, boolean validateConnectorConfig) { if (isEmpty(sourceConfig.getTenant())) { throw new IllegalArgumentException("Source tenant cannot be null"); @@ -319,29 +319,34 @@ public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sour // if class name in source config is not set, this should be a built-in source // thus we should try to find it class name in the NAR service definition if (sourceClassName == null) { - try { - sourceClassName = ConnectorUtils.getIOSourceClass((NarClassLoader) sourceClassLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract source class from archive", e); + ConnectorDefinition connectorDefinition = sourceFunction.getFunctionMetaData(ConnectorDefinition.class); + if (connectorDefinition == null) { + throw new IllegalArgumentException( + "Source package doesn't contain the META-INF/services/pulsar-io.yaml file."); + } + sourceClassName = connectorDefinition.getSourceClass(); + if (sourceClassName == null) { + throw new IllegalArgumentException("Failed to extract source class from archive"); } } // check if source implements the correct interfaces - Class sourceClass; + TypeDescription sourceClass; try { - sourceClass = sourceClassLoader.loadClass(sourceClassName); - } catch (ClassNotFoundException e) { + sourceClass = sourceFunction.resolveType(sourceClassName); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( String.format("Source class %s not found in class loader", sourceClassName), e); } - if (!Source.class.isAssignableFrom(sourceClass) && !BatchSource.class.isAssignableFrom(sourceClass)) { + if (!(sourceClass.asErasure().isAssignableTo(Source.class) || sourceClass.asErasure() + .isAssignableTo(BatchSource.class))) { throw new IllegalArgumentException( - String.format("Source class %s does not implement the correct interface", - sourceClass.getName())); + String.format("Source class %s does not implement the correct interface", + sourceClass.getName())); } - if (BatchSource.class.isAssignableFrom(sourceClass)) { + if (sourceClass.asErasure().isAssignableTo(BatchSource.class)) { if (sourceConfig.getBatchSourceConfig() != null) { validateBatchSourceConfig(sourceConfig.getBatchSourceConfig()); } else { @@ -352,7 +357,14 @@ public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sour } // extract type from source class - Class typeArg = getSourceType(sourceClass); + TypeDefinition typeArg; + + try { + typeArg = getSourceType(sourceClass); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format("Failed to resolve type for Source class %s", sourceClassName), e); + } // Only one of serdeClassName or schemaType should be set if (!StringUtils.isEmpty(sourceConfig.getSerdeClassName()) && !StringUtils @@ -361,29 +373,30 @@ public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sour } if (!StringUtils.isEmpty(sourceConfig.getSerdeClassName())) { - ValidatorUtils.validateSerde(sourceConfig.getSerdeClassName(), typeArg, sourceClassLoader, false); + ValidatorUtils.validateSerde(sourceConfig.getSerdeClassName(), typeArg, sourceFunction.getTypePool(), + false); } if (!StringUtils.isEmpty(sourceConfig.getSchemaType())) { - ValidatorUtils.validateSchema(sourceConfig.getSchemaType(), typeArg, sourceClassLoader, false); + ValidatorUtils.validateSchema(sourceConfig.getSchemaType(), typeArg, sourceFunction.getTypePool(), + false); } if (sourceConfig.getProducerConfig() != null && sourceConfig.getProducerConfig().getCryptoConfig() != null) { ValidatorUtils - .validateCryptoKeyReader(sourceConfig.getProducerConfig().getCryptoConfig(), sourceClassLoader, - true); - } - - if (typeArg.equals(TypeResolver.Unknown.class)) { - throw new IllegalArgumentException( - String.format("Failed to resolve type for Source class %s", sourceClassName)); + .validateCryptoKeyReader(sourceConfig.getProducerConfig().getCryptoConfig(), + sourceFunction.getTypePool(), true); } - // validate user defined config if enabled and source is loaded from NAR - if (validateConnectorConfig && sourceClassLoader instanceof NarClassLoader) { - validateSourceConfig(sourceConfig, (NarClassLoader) sourceClassLoader); + // validate user defined config if enabled and classloading is enabled + if (validateConnectorConfig) { + if (sourceFunction.isEnableClassloading()) { + validateSourceConfig(sourceConfig, sourceFunction); + } else { + log.warn("Skipping annotation based validation of sink config as classloading is disabled"); + } } - return new ExtractedSourceDetails(sourceClassName, typeArg.getName()); + return new ExtractedSourceDetails(sourceClassName, typeArg.asErasure().getTypeName()); } @SneakyThrows @@ -448,6 +461,9 @@ public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceCon validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig()); mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig()); } + if (newConfig.getProducerConfig() != null) { + mergedConfig.setProducerConfig(newConfig.getProducerConfig()); + } return mergedConfig; } @@ -521,15 +537,14 @@ public static void validateBatchSourceConfigUpdate(BatchSourceConfig existingCon } } - public static void validateSourceConfig(SourceConfig sourceConfig, NarClassLoader narClassLoader) { + public static void validateSourceConfig(SourceConfig sourceConfig, ValidatableFunctionPackage sourceFunction) { try { - ConnectorDefinition defn = ConnectorUtils.getConnectorDefinition(narClassLoader); - if (defn.getSourceConfigClass() != null) { - Class configClass = Class.forName(defn.getSourceConfigClass(), true, narClassLoader); + ConnectorDefinition defn = sourceFunction.getFunctionMetaData(ConnectorDefinition.class); + if (defn != null && defn.getSourceConfigClass() != null) { + Class configClass = + Class.forName(defn.getSourceConfigClass(), true, sourceFunction.getClassLoader()); validateSourceConfig(sourceConfig, configClass); } - } catch (IOException e) { - throw new IllegalArgumentException("Error validating source config", e); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Could not find source config class"); } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatableFunctionPackage.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatableFunctionPackage.java new file mode 100644 index 0000000000000..8d5aefb6f6785 --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatableFunctionPackage.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.pool.TypePool; + +/** + * This abstraction separates the function and connector definition from classloading, + * enabling validation without the need for classloading. It utilizes Byte Buddy for + * type and annotation resolution. + * + * The function or connector definition is directly extracted from the archive file, + * eliminating the need for classloader initialization. + * + * The getClassLoader method should only be invoked when classloading is enabled. + * Classloading is required in the LocalRunner and in the Functions worker when the + * worker is configured with the 'validateConnectorConfig' set to true. + */ +public interface ValidatableFunctionPackage { + /** + * Resolves the type description for the given class name within the function package. + */ + TypeDescription resolveType(String className); + /** + * Returns the Byte Buddy TypePool instance for the function package. + */ + TypePool getTypePool(); + /** + * Returns the function or connector definition metadata. + * Supports FunctionDefinition and ConnectorDefinition as the metadata type. + */ + T getFunctionMetaData(Class clazz); + /** + * Returns if classloading is enabled for the function package. + */ + boolean isEnableClassloading(); + /** + * Returns the classloader for the function package. The classloader is + * lazily initialized when classloading is enabled. + */ + ClassLoader getClassLoader(); +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java index 390671c5606af..8df6a3f261a6e 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java @@ -18,35 +18,40 @@ */ package org.apache.pulsar.functions.utils; -import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isEmpty; -import static org.apache.commons.lang3.StringUtils.isNotBlank; import java.util.Map; import lombok.extern.slf4j.Slf4j; -import net.jodah.typetools.TypeResolver; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.pool.TypePool; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.functions.CryptoConfig; import org.apache.pulsar.common.schema.SchemaType; -import org.apache.pulsar.common.util.ClassLoaderUtils; -import org.apache.pulsar.common.util.Reflections; import org.apache.pulsar.functions.api.SerDe; -import org.apache.pulsar.functions.proto.Function; -import org.apache.pulsar.io.core.Sink; -import org.apache.pulsar.io.core.Source; @Slf4j public class ValidatorUtils { private static final String DEFAULT_SERDE = "org.apache.pulsar.functions.api.utils.DefaultSerDe"; - public static void validateSchema(String schemaType, Class typeArg, ClassLoader clsLoader, + public static void validateSchema(String schemaType, TypeDefinition typeArg, TypePool typePool, boolean input) { if (isEmpty(schemaType) || getBuiltinSchemaType(schemaType) != null) { // If it's empty, we use the default schema and no need to validate // If it's built-in, no need to validate } else { - ClassLoaderUtils.implementsClass(schemaType, Schema.class, clsLoader); - validateSchemaType(schemaType, typeArg, clsLoader, input); + TypeDescription schemaClass = null; + try { + schemaClass = typePool.describe(schemaType).resolve(); + } catch (TypePool.Resolution.NoSuchTypeException e) { + throw new IllegalArgumentException( + String.format("The schema class %s does not exist", schemaType)); + } + if (!schemaClass.asErasure().isAssignableTo(Schema.class)) { + throw new IllegalArgumentException( + String.format("%s does not implement %s", schemaType, Schema.class.getName())); + } + validateSchemaType(schemaClass, typeArg, typePool, input); } } @@ -60,29 +65,32 @@ private static SchemaType getBuiltinSchemaType(String schemaTypeOrClassName) { } - public static void validateCryptoKeyReader(CryptoConfig conf, ClassLoader classLoader, boolean isProducer) { + public static void validateCryptoKeyReader(CryptoConfig conf, TypePool typePool, boolean isProducer) { if (isEmpty(conf.getCryptoKeyReaderClassName())) { return; } - Class cryptoClass; + String cryptoClassName = conf.getCryptoKeyReaderClassName(); + TypeDescription cryptoClass = null; try { - cryptoClass = ClassLoaderUtils.loadClass(conf.getCryptoKeyReaderClassName(), classLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { + cryptoClass = typePool.describe(cryptoClassName).resolve(); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("The crypto key reader class %s does not exist", conf.getCryptoKeyReaderClassName())); + String.format("The crypto key reader class %s does not exist", cryptoClassName)); + } + if (!cryptoClass.asErasure().isAssignableTo(CryptoKeyReader.class)) { + throw new IllegalArgumentException( + String.format("%s does not implement %s", cryptoClassName, CryptoKeyReader.class.getName())); } - ClassLoaderUtils.implementsClass(conf.getCryptoKeyReaderClassName(), CryptoKeyReader.class, classLoader); - try { - cryptoClass.getConstructor(Map.class); - } catch (NoSuchMethodException ex) { + boolean hasConstructor = cryptoClass.getDeclaredMethods().stream() + .anyMatch(method -> method.isConstructor() && method.getParameters().size() == 1 + && method.getParameters().get(0).getType().asErasure().represents(Map.class)); + + if (!hasConstructor) { throw new IllegalArgumentException( String.format("The crypto key reader class %s does not implement the desired constructor.", conf.getCryptoKeyReaderClassName())); - - } catch (SecurityException e) { - throw new IllegalArgumentException("Failed to access crypto key reader class", e); } if (isProducer && (conf.getEncryptionKeys() == null || conf.getEncryptionKeys().length == 0)) { @@ -90,7 +98,7 @@ public static void validateCryptoKeyReader(CryptoConfig conf, ClassLoader classL } } - public static void validateSerde(String inputSerializer, Class typeArg, ClassLoader clsLoader, + public static void validateSerde(String inputSerializer, TypeDefinition typeArg, TypePool typePool, boolean deser) { if (isEmpty(inputSerializer)) { return; @@ -98,154 +106,53 @@ public static void validateSerde(String inputSerializer, Class typeArg, Class if (inputSerializer.equals(DEFAULT_SERDE)) { return; } + TypeDescription serdeClass; try { - Class serdeClass = ClassLoaderUtils.loadClass(inputSerializer, clsLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { + serdeClass = typePool.describe(inputSerializer).resolve(); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( String.format("The input serialization/deserialization class %s does not exist", inputSerializer)); } - ClassLoaderUtils.implementsClass(inputSerializer, SerDe.class, clsLoader); - - SerDe serDe = (SerDe) Reflections.createInstance(inputSerializer, clsLoader); - if (serDe == null) { - throw new IllegalArgumentException(String.format("The SerDe class %s does not exist", - inputSerializer)); - } - Class[] serDeTypes = TypeResolver.resolveRawArguments(SerDe.class, serDe.getClass()); - - // type inheritance information seems to be lost in generic type - // load the actual type class for verification - Class fnInputClass; - Class serdeInputClass; - try { - fnInputClass = Class.forName(typeArg.getName(), true, clsLoader); - serdeInputClass = Class.forName(serDeTypes[0].getName(), true, clsLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException("Failed to load type class", e); - } + TypeDescription.Generic serDeTypeArg = serdeClass.getInterfaces().stream() + .filter(i -> i.asErasure().isAssignableTo(SerDe.class)) + .findFirst() + .map(i -> i.getTypeArguments().get(0)) + .orElseThrow(() -> new IllegalArgumentException( + String.format("%s does not implement %s", inputSerializer, SerDe.class.getName()))); if (deser) { - if (!fnInputClass.isAssignableFrom(serdeInputClass)) { - throw new IllegalArgumentException("Serializer type mismatch " + typeArg + " vs " + serDeTypes[0]); + if (!serDeTypeArg.asErasure().isAssignableTo(typeArg.asErasure())) { + throw new IllegalArgumentException("Serializer type mismatch " + typeArg.getActualName() + " vs " + + serDeTypeArg.getActualName()); } } else { - if (!serdeInputClass.isAssignableFrom(fnInputClass)) { - throw new IllegalArgumentException("Serializer type mismatch " + typeArg + " vs " + serDeTypes[0]); + if (!serDeTypeArg.asErasure().isAssignableFrom(typeArg.asErasure())) { + throw new IllegalArgumentException("Serializer type mismatch " + typeArg.getActualName() + " vs " + + serDeTypeArg.getActualName()); } } } - private static void validateSchemaType(String schemaClassName, Class typeArg, ClassLoader clsLoader, + private static void validateSchemaType(TypeDefinition schema, TypeDefinition typeArg, TypePool typePool, boolean input) { - Schema schema = (Schema) Reflections.createInstance(schemaClassName, clsLoader); - if (schema == null) { - throw new IllegalArgumentException(String.format("The Schema class %s does not exist", - schemaClassName)); - } - Class[] schemaTypes = TypeResolver.resolveRawArguments(Schema.class, schema.getClass()); - // type inheritance information seems to be lost in generic type - // load the actual type class for verification - Class fnInputClass; - Class schemaInputClass; - try { - fnInputClass = Class.forName(typeArg.getName(), true, clsLoader); - schemaInputClass = Class.forName(schemaTypes[0].getName(), true, clsLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException("Failed to load type class", e); - } + TypeDescription.Generic schemaTypeArg = schema.getInterfaces().stream() + .filter(i -> i.asErasure().isAssignableTo(Schema.class)) + .findFirst() + .map(i -> i.getTypeArguments().get(0)) + .orElse(null); if (input) { - if (!fnInputClass.isAssignableFrom(schemaInputClass)) { + if (!schemaTypeArg.asErasure().isAssignableTo(typeArg.asErasure())) { throw new IllegalArgumentException( - "Schema type mismatch " + typeArg + " vs " + schemaTypes[0]); + "Schema type mismatch " + typeArg.getActualName() + " vs " + schemaTypeArg.getActualName()); } } else { - if (!schemaInputClass.isAssignableFrom(fnInputClass)) { + if (!schemaTypeArg.asErasure().isAssignableFrom(typeArg.asErasure())) { throw new IllegalArgumentException( - "Schema type mismatch " + typeArg + " vs " + schemaTypes[0]); - } - } - } - - - public static void validateFunctionClassTypes(ClassLoader classLoader, - Function.FunctionDetails.Builder functionDetailsBuilder) { - - // validate only if classLoader is provided - if (classLoader == null) { - return; - } - - if (isBlank(functionDetailsBuilder.getClassName())) { - throw new IllegalArgumentException("Function class-name can't be empty"); - } - - // validate function class-type - Class functionClass; - try { - functionClass = classLoader.loadClass(functionDetailsBuilder.getClassName()); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionDetailsBuilder.getClassName()), e); - } - Class[] typeArgs = FunctionCommon.getFunctionTypes(functionClass, false); - - if (!(org.apache.pulsar.functions.api.Function.class.isAssignableFrom(functionClass)) - && !(java.util.function.Function.class.isAssignableFrom(functionClass))) { - throw new RuntimeException("User class must either be Function or java.util.Function"); - } - - if (functionDetailsBuilder.hasSource() && functionDetailsBuilder.getSource() != null - && isNotBlank(functionDetailsBuilder.getSource().getClassName())) { - try { - String sourceClassName = functionDetailsBuilder.getSource().getClassName(); - String argClassName = FunctionCommon.getTypeArg(sourceClassName, Source.class, classLoader).getName(); - functionDetailsBuilder - .setSource(functionDetailsBuilder.getSourceBuilder().setTypeClassName(argClassName)); - - // if sink-class not present then set same arg as source - if (!functionDetailsBuilder.hasSink() || isBlank(functionDetailsBuilder.getSink().getClassName())) { - functionDetailsBuilder - .setSink(functionDetailsBuilder.getSinkBuilder().setTypeClassName(argClassName)); - } - - } catch (IllegalArgumentException ie) { - throw ie; - } catch (Exception e) { - log.error("Failed to validate source class", e); - throw new IllegalArgumentException("Failed to validate source class-name", e); - } - } else if (isBlank(functionDetailsBuilder.getSourceBuilder().getTypeClassName())) { - // if function-src-class is not present then set function-src type-class according to function class - functionDetailsBuilder - .setSource(functionDetailsBuilder.getSourceBuilder().setTypeClassName(typeArgs[0].getName())); - } - - if (functionDetailsBuilder.hasSink() && functionDetailsBuilder.getSink() != null - && isNotBlank(functionDetailsBuilder.getSink().getClassName())) { - try { - String sinkClassName = functionDetailsBuilder.getSink().getClassName(); - String argClassName = FunctionCommon.getTypeArg(sinkClassName, Sink.class, classLoader).getName(); - functionDetailsBuilder.setSink(functionDetailsBuilder.getSinkBuilder().setTypeClassName(argClassName)); - - // if source-class not present then set same arg as sink - if (!functionDetailsBuilder.hasSource() || isBlank(functionDetailsBuilder.getSource().getClassName())) { - functionDetailsBuilder - .setSource(functionDetailsBuilder.getSourceBuilder().setTypeClassName(argClassName)); - } - - } catch (IllegalArgumentException ie) { - throw ie; - } catch (Exception e) { - log.error("Failed to validate sink class", e); - throw new IllegalArgumentException("Failed to validate sink class-name", e); + "Schema type mismatch " + typeArg.getActualName() + " vs " + schemaTypeArg.getActualName()); } - } else if (isBlank(functionDetailsBuilder.getSinkBuilder().getTypeClassName())) { - // if function-sink-class is not present then set function-sink type-class according to function class - functionDetailsBuilder - .setSink(functionDetailsBuilder.getSinkBuilder().setTypeClassName(typeArgs[1].getName())); } } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java index 028b57d69c86b..cfb213f34ed72 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java @@ -19,14 +19,50 @@ package org.apache.pulsar.functions.utils.functions; import java.nio.file.Path; -import lombok.Builder; -import lombok.Data; import org.apache.pulsar.common.functions.FunctionDefinition; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; -@Builder -@Data -public class FunctionArchive { - private Path archivePath; - private ClassLoader classLoader; - private FunctionDefinition functionDefinition; +public class FunctionArchive implements AutoCloseable { + private final Path archivePath; + private final FunctionDefinition functionDefinition; + private final String narExtractionDirectory; + private final boolean enableClassloading; + private ValidatableFunctionPackage functionPackage; + private boolean closed; + + public FunctionArchive(Path archivePath, FunctionDefinition functionDefinition, String narExtractionDirectory, + boolean enableClassloading) { + this.archivePath = archivePath; + this.functionDefinition = functionDefinition; + this.narExtractionDirectory = narExtractionDirectory; + this.enableClassloading = enableClassloading; + } + + public Path getArchivePath() { + return archivePath; + } + + public synchronized ValidatableFunctionPackage getFunctionPackage() { + if (closed) { + throw new IllegalStateException("FunctionArchive is already closed"); + } + if (functionPackage == null) { + functionPackage = new FunctionFilePackage(archivePath.toFile(), narExtractionDirectory, enableClassloading, + FunctionDefinition.class); + } + return functionPackage; + } + + public FunctionDefinition getFunctionDefinition() { + return functionDefinition; + } + + @Override + public synchronized void close() throws Exception { + closed = true; + if (functionPackage instanceof AutoCloseable) { + ((AutoCloseable) functionPackage).close(); + } + } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java index 941df573e495e..31a5540e0bfaf 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.apache.pulsar.functions.utils.functions; import java.io.File; @@ -30,10 +31,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; import org.apache.pulsar.common.util.ObjectMapperFactory; -import org.apache.pulsar.functions.api.Function; -import org.apache.pulsar.functions.utils.Exceptions; +import org.zeroturnaround.zip.ZipUtil; @UtilityClass @@ -45,43 +44,40 @@ public class FunctionUtils { /** * Extract the Pulsar Function class from a function or archive. */ - public static String getFunctionClass(ClassLoader classLoader) throws IOException { - NarClassLoader ncl = (NarClassLoader) classLoader; - String configStr = ncl.getServiceDefinition(PULSAR_IO_SERVICE_NAME); - - FunctionDefinition conf = ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, - FunctionDefinition.class); - if (StringUtils.isEmpty(conf.getFunctionClass())) { - throw new IOException( - String.format("The '%s' functionctor does not provide a function implementation", conf.getName())); - } + public static String getFunctionClass(File narFile) throws IOException { + return getFunctionDefinition(narFile).getFunctionClass(); + } - try { - // Try to load source class and check it implements Function interface - Class functionClass = ncl.loadClass(conf.getFunctionClass()); - if (!(Function.class.isAssignableFrom(functionClass))) { - throw new IOException( - "Class " + conf.getFunctionClass() + " does not implement interface " + Function.class - .getName()); - } - } catch (Throwable t) { - Exceptions.rethrowIOException(t); + public static FunctionDefinition getFunctionDefinition(File narFile) throws IOException { + return getPulsarIOServiceConfig(narFile, FunctionDefinition.class); + } + + public static T getPulsarIOServiceConfig(File narFile, Class valueType) throws IOException { + String filename = "META-INF/services/" + PULSAR_IO_SERVICE_NAME; + byte[] configEntry = ZipUtil.unpackEntry(narFile, filename); + if (configEntry != null) { + return ObjectMapperFactory.getYamlMapper().reader().readValue(configEntry, valueType); + } else { + return null; } + } - return conf.getFunctionClass(); + public static String getFunctionClass(NarClassLoader narClassLoader) throws IOException { + return getFunctionDefinition(narClassLoader).getFunctionClass(); } public static FunctionDefinition getFunctionDefinition(NarClassLoader narClassLoader) throws IOException { - String configStr = narClassLoader.getServiceDefinition(PULSAR_IO_SERVICE_NAME); - return ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, FunctionDefinition.class); + return getPulsarIOServiceConfig(narClassLoader, FunctionDefinition.class); } - public static TreeMap searchForFunctions(String functionsDirectory) throws IOException { - return searchForFunctions(functionsDirectory, false); + public static T getPulsarIOServiceConfig(NarClassLoader narClassLoader, Class valueType) throws IOException { + return ObjectMapperFactory.getYamlMapper().reader() + .readValue(narClassLoader.getServiceDefinition(PULSAR_IO_SERVICE_NAME), valueType); } public static TreeMap searchForFunctions(String functionsDirectory, - boolean alwaysPopulatePath) throws IOException { + String narExtractionDirectory, + boolean enableClassloading) throws IOException { Path path = Paths.get(functionsDirectory).toAbsolutePath(); log.info("Searching for functions in {}", path); @@ -95,22 +91,12 @@ public static TreeMap searchForFunctions(String functio try (DirectoryStream stream = Files.newDirectoryStream(path, "*.nar")) { for (Path archive : stream) { try { - - NarClassLoader ncl = NarClassLoaderBuilder.builder() - .narFile(new File(archive.toString())) - .build(); - - FunctionArchive.FunctionArchiveBuilder functionArchiveBuilder = FunctionArchive.builder(); - FunctionDefinition cntDef = FunctionUtils.getFunctionDefinition(ncl); + FunctionDefinition cntDef = FunctionUtils.getFunctionDefinition(archive.toFile()); log.info("Found function {} from {}", cntDef, archive); - - functionArchiveBuilder.archivePath(archive); - - functionArchiveBuilder.classLoader(ncl); - functionArchiveBuilder.functionDefinition(cntDef); - - if (alwaysPopulatePath || !StringUtils.isEmpty(cntDef.getFunctionClass())) { - functions.put(cntDef.getName(), functionArchiveBuilder.build()); + if (!StringUtils.isEmpty(cntDef.getFunctionClass())) { + FunctionArchive functionArchive = + new FunctionArchive(archive, cntDef, narExtractionDirectory, enableClassloading); + functions.put(cntDef.getName(), functionArchive); } } catch (Throwable t) { log.warn("Failed to load function from {}", archive, t); diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java index f1a03f4424ec6..5fcc22747c516 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java @@ -20,17 +20,79 @@ import java.nio.file.Path; import java.util.List; -import lombok.Builder; -import lombok.Data; +import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.io.ConfigFieldDefinition; import org.apache.pulsar.common.io.ConnectorDefinition; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; -@Builder -@Data -public class Connector { - private Path archivePath; +public class Connector implements AutoCloseable { + private final Path archivePath; + private final String narExtractionDirectory; + private final boolean enableClassloading; + private ValidatableFunctionPackage connectorFunctionPackage; private List sourceConfigFieldDefinitions; private List sinkConfigFieldDefinitions; - private ClassLoader classLoader; private ConnectorDefinition connectorDefinition; + private boolean closed; + + public Connector(Path archivePath, ConnectorDefinition connectorDefinition, String narExtractionDirectory, + boolean enableClassloading) { + this.archivePath = archivePath; + this.connectorDefinition = connectorDefinition; + this.narExtractionDirectory = narExtractionDirectory; + this.enableClassloading = enableClassloading; + } + + public Path getArchivePath() { + return archivePath; + } + + public synchronized ValidatableFunctionPackage getConnectorFunctionPackage() { + checkState(); + if (connectorFunctionPackage == null) { + connectorFunctionPackage = + new FunctionFilePackage(archivePath.toFile(), narExtractionDirectory, enableClassloading, + ConnectorDefinition.class); + } + return connectorFunctionPackage; + } + + private void checkState() { + if (closed) { + throw new IllegalStateException("Connector is already closed"); + } + } + + public synchronized List getSourceConfigFieldDefinitions() { + checkState(); + if (sourceConfigFieldDefinitions == null && !StringUtils.isEmpty(connectorDefinition.getSourceClass()) + && !StringUtils.isEmpty(connectorDefinition.getSourceConfigClass())) { + sourceConfigFieldDefinitions = ConnectorUtils.getConnectorConfigDefinition(getConnectorFunctionPackage(), + connectorDefinition.getSourceConfigClass()); + } + return sourceConfigFieldDefinitions; + } + + public synchronized List getSinkConfigFieldDefinitions() { + checkState(); + if (sinkConfigFieldDefinitions == null && !StringUtils.isEmpty(connectorDefinition.getSinkClass()) + && !StringUtils.isEmpty(connectorDefinition.getSinkConfigClass())) { + sinkConfigFieldDefinitions = ConnectorUtils.getConnectorConfigDefinition(getConnectorFunctionPackage(), + connectorDefinition.getSinkConfigClass()); + } + return sinkConfigFieldDefinitions; + } + + public ConnectorDefinition getConnectorDefinition() { + return connectorDefinition; + } + + @Override + public synchronized void close() throws Exception { + closed = true; + if (connectorFunctionPackage instanceof AutoCloseable) { + ((AutoCloseable) connectorFunctionPackage).close(); + } + } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java index a814bf35548f3..df1310965f392 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java @@ -18,38 +18,31 @@ */ package org.apache.pulsar.functions.utils.io; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.File; import java.io.IOException; -import java.lang.annotation.Annotation; -import java.lang.reflect.Field; -import java.lang.reflect.Method; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.AbstractMap; -import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.stream.Collectors; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; +import net.bytebuddy.description.annotation.AnnotationDescription; +import net.bytebuddy.description.annotation.AnnotationValue; +import net.bytebuddy.description.field.FieldDescription; +import net.bytebuddy.description.method.MethodDescription; +import net.bytebuddy.description.type.TypeDefinition; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.io.ConfigFieldDefinition; import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; -import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.ObjectMapperFactory; -import org.apache.pulsar.common.util.Reflections; import org.apache.pulsar.functions.utils.Exceptions; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; import org.apache.pulsar.io.core.BatchSource; import org.apache.pulsar.io.core.Sink; import org.apache.pulsar.io.core.Source; @@ -76,7 +69,7 @@ public static String getIOSourceClass(NarClassLoader narClassLoader) throws IOEx Class sourceClass = narClassLoader.loadClass(conf.getSourceClass()); if (!(Source.class.isAssignableFrom(sourceClass) || BatchSource.class.isAssignableFrom(sourceClass))) { throw new IOException(String.format("Class %s does not implement interface %s or %s", - conf.getSourceClass(), Source.class.getName(), BatchSource.class.getName())); + conf.getSourceClass(), Source.class.getName(), BatchSource.class.getName())); } } catch (Throwable t) { Exceptions.rethrowIOException(t); @@ -109,32 +102,36 @@ public static String getIOSinkClass(NarClassLoader narClassLoader) throws IOExce return conf.getSinkClass(); } - public static ConnectorDefinition getConnectorDefinition(NarClassLoader narClassLoader) throws IOException { - String configStr = narClassLoader.getServiceDefinition(PULSAR_IO_SERVICE_NAME); + public static ConnectorDefinition getConnectorDefinition(File narFile) throws IOException { + return FunctionUtils.getPulsarIOServiceConfig(narFile, ConnectorDefinition.class); + } - return ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, ConnectorDefinition.class); + public static ConnectorDefinition getConnectorDefinition(NarClassLoader narClassLoader) throws IOException { + return FunctionUtils.getPulsarIOServiceConfig(narClassLoader, ConnectorDefinition.class); } - public static List getConnectorConfigDefinition(ClassLoader classLoader, - String configClassName) throws Exception { + public static List getConnectorConfigDefinition( + ValidatableFunctionPackage connectorFunctionPackage, + String configClassName) { List retval = new LinkedList<>(); - Class configClass = classLoader.loadClass(configClassName); - for (Field field : Reflections.getAllFields(configClass)) { - if (java.lang.reflect.Modifier.isStatic(field.getModifiers())) { - // We dont want static fields + TypeDefinition configClass = connectorFunctionPackage.resolveType(configClassName); + + for (FieldDescription field : getAllFields(configClass)) { + if (field.isStatic()) { + // We don't want static fields continue; } - field.setAccessible(true); ConfigFieldDefinition configFieldDefinition = new ConfigFieldDefinition(); configFieldDefinition.setFieldName(field.getName()); - configFieldDefinition.setTypeName(field.getType().getName()); + configFieldDefinition.setTypeName(field.getType().getActualName()); Map attributes = new HashMap<>(); - for (Annotation annotation : field.getAnnotations()) { - if (annotation.annotationType().equals(FieldDoc.class)) { - FieldDoc fieldDoc = (FieldDoc) annotation; - for (Method method : FieldDoc.class.getDeclaredMethods()) { - Object value = method.invoke(fieldDoc); - attributes.put(method.getName(), value == null ? "" : value.toString()); + for (AnnotationDescription annotation : field.getDeclaredAnnotations()) { + if (annotation.getAnnotationType().represents(FieldDoc.class)) { + for (MethodDescription.InDefinedShape method : annotation.getAnnotationType() + .getDeclaredMethods()) { + AnnotationValue value = annotation.getValue(method.getName()); + attributes.put(method.getName(), + value == null || value.resolve() == null ? "" : value.resolve().toString()); } } } @@ -145,86 +142,42 @@ public static List getConnectorConfigDefinition(ClassLoad return retval; } + private static List getAllFields(TypeDefinition type) { + List fields = new LinkedList<>(); + fields.addAll(type.getDeclaredFields()); + + if (type.getSuperClass() != null) { + fields.addAll(getAllFields(type.getSuperClass())); + } + + return fields; + } + public static TreeMap searchForConnectors(String connectorsDirectory, - String narExtractionDirectory) throws IOException { + String narExtractionDirectory, + boolean enableClassloading) throws IOException { Path path = Paths.get(connectorsDirectory).toAbsolutePath(); log.info("Searching for connectors in {}", path); + TreeMap connectors = new TreeMap<>(); + if (!path.toFile().exists()) { log.warn("Connectors archive directory not found"); - return new TreeMap<>(); + return connectors; } - List archives = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(path, "*.nar")) { for (Path archive : stream) { - archives.add(archive); - } - } - if (archives.isEmpty()) { - return new TreeMap<>(); - } - - ExecutorService oneTimeExecutor = null; - try { - int nThreads = Math.min(Runtime.getRuntime().availableProcessors(), archives.size()); - log.info("Loading {} connector definitions with a thread pool of size {}", archives.size(), nThreads); - oneTimeExecutor = Executors.newFixedThreadPool(nThreads, - new ThreadFactoryBuilder().setNameFormat("connector-extraction-executor-%d").build()); - List>> futures = new ArrayList<>(); - for (Path archive : archives) { - CompletableFuture> future = CompletableFuture.supplyAsync(() -> - getConnectorDefinitionEntry(archive, narExtractionDirectory), oneTimeExecutor); - futures.add(future); - } - - FutureUtil.waitForAll(futures).join(); - return futures.stream() - .map(CompletableFuture::join) - .filter(entry -> entry != null) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (a, b) -> a, TreeMap::new)); - } finally { - if (oneTimeExecutor != null) { - oneTimeExecutor.shutdown(); - } - } - } - - private static Map.Entry getConnectorDefinitionEntry(Path archive, - String narExtractionDirectory) { - try { - - NarClassLoader ncl = NarClassLoaderBuilder.builder() - .narFile(new File(archive.toString())) - .extractionDirectory(narExtractionDirectory) - .build(); - - Connector.ConnectorBuilder connectorBuilder = Connector.builder(); - ConnectorDefinition cntDef = ConnectorUtils.getConnectorDefinition(ncl); - log.info("Found connector {} from {}", cntDef, archive); - - connectorBuilder.archivePath(archive); - if (!StringUtils.isEmpty(cntDef.getSourceClass())) { - if (!StringUtils.isEmpty(cntDef.getSourceConfigClass())) { - connectorBuilder.sourceConfigFieldDefinitions( - ConnectorUtils.getConnectorConfigDefinition(ncl, - cntDef.getSourceConfigClass())); + try { + ConnectorDefinition cntDef = ConnectorUtils.getConnectorDefinition(archive.toFile()); + log.info("Found connector {} from {}", cntDef, archive); + Connector connector = new Connector(archive, cntDef, narExtractionDirectory, enableClassloading); + connectors.put(cntDef.getName(), connector); + } catch (Throwable t) { + log.warn("Failed to load connector from {}", archive, t); } } - - if (!StringUtils.isEmpty(cntDef.getSinkClass())) { - if (!StringUtils.isEmpty(cntDef.getSinkConfigClass())) { - connectorBuilder.sinkConfigFieldDefinitions( - ConnectorUtils.getConnectorConfigDefinition(ncl, cntDef.getSinkConfigClass())); - } - } - - connectorBuilder.classLoader(ncl); - connectorBuilder.connectorDefinition(cntDef); - return new AbstractMap.SimpleEntry(cntDef.getName(), connectorBuilder.build()); - } catch (Throwable t) { - log.warn("Failed to load connector from {}", archive, t); - return null; } + return connectors; } } diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java index 113824fc7c1a1..90fdd4da777d3 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java @@ -18,21 +18,29 @@ */ package org.apache.pulsar.functions.utils; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.configureFor; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; +import com.github.tomakehurst.wiremock.WireMockServer; import java.io.File; import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import lombok.Cleanup; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.pool.TypePool; import org.apache.pulsar.client.impl.MessageIdImpl; -import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.functions.api.Context; import org.apache.pulsar.functions.api.Function; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.WindowContext; import org.apache.pulsar.functions.api.WindowFunction; import org.assertj.core.util.Files; -import org.testng.Assert; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -40,41 +48,6 @@ * Unit test of {@link Exceptions}. */ public class FunctionCommonTest { - - @Test - public void testValidateLocalFileUrl() throws Exception { - String fileLocation = FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - try { - // eg: fileLocation : /dir/fileName.jar (invalid) - FunctionCommon.extractClassLoader(fileLocation); - Assert.fail("should fail with invalid url: without protocol"); - } catch (IllegalArgumentException ie) { - // Ok.. expected exception - } - String fileLocationWithProtocol = "file://" + fileLocation; - // eg: fileLocation : file:///dir/fileName.jar (valid) - FunctionCommon.extractClassLoader(fileLocationWithProtocol); - // eg: fileLocation : file:/dir/fileName.jar (valid) - fileLocationWithProtocol = "file:" + fileLocation; - FunctionCommon.extractClassLoader(fileLocationWithProtocol); - } - - @Test - public void testValidateHttpFileUrl() throws Exception { - - String jarHttpUrl = "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; - FunctionCommon.extractClassLoader(jarHttpUrl); - - jarHttpUrl = "http://_invalidurl_.com"; - try { - // eg: fileLocation : /dir/fileName.jar (invalid) - FunctionCommon.extractClassLoader(jarHttpUrl); - Assert.fail("should fail with invalid url: without protocol"); - } catch (Exception ie) { - // Ok.. expected exception - } - } - @Test public void testDownloadFile() throws Exception { final String jarHttpUrl = "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; @@ -87,7 +60,14 @@ public void testDownloadFile() throws Exception { @Test public void testDownloadFileWithBasicAuth() throws Exception { - final String jarHttpUrl = "https://foo:bar@httpbin.org/basic-auth/foo/bar"; + @Cleanup("stop") + WireMockServer server = new WireMockServer(0); + server.start(); + configureFor(server.port()); + stubFor(get(urlPathEqualTo("/")) + .withBasicAuth("foo", "bar") + .willReturn(aResponse().withBody("Hello world!").withStatus(200))); + final String jarHttpUrl = "http://foo:bar@localhost:" + server.port() + "/"; final File file = Files.newTemporaryFile(); file.deleteOnExit(); assertThat(file.length()).isZero(); @@ -136,6 +116,14 @@ public Record process(String input, Context context) throws Exception { } }, false }, + { + new Function>() { + @Override + public CompletableFuture process(String input, Context context) throws Exception { + return null; + } + }, false + }, { new java.util.function.Function() { @Override @@ -152,6 +140,14 @@ public Record apply(String s) { } }, false }, + { + new java.util.function.Function>() { + @Override + public CompletableFuture apply(String s) { + return null; + } + }, false + }, { new WindowFunction() { @Override @@ -168,6 +164,14 @@ public Record process(Collection> input, WindowContext c } }, true }, + { + new WindowFunction>() { + @Override + public CompletableFuture process(Collection> input, WindowContext context) throws Exception { + return null; + } + }, true + }, { new java.util.function.Function, Integer>() { @Override @@ -183,15 +187,26 @@ public Record apply(Collection strings) { return null; } }, true + }, + { + new java.util.function.Function, CompletableFuture>() { + @Override + public CompletableFuture apply(Collection strings) { + return null; + } + }, true } }; } @Test(dataProvider = "function") public void testGetFunctionTypes(Object function, boolean isWindowConfigPresent) { - Class[] types = FunctionCommon.getFunctionTypes(function.getClass(), isWindowConfigPresent); + TypePool typePool = TypePool.Default.of(function.getClass().getClassLoader()); + TypeDefinition[] types = + FunctionCommon.getFunctionTypes(typePool.describe(function.getClass().getName()).resolve(), + isWindowConfigPresent); assertEquals(types.length, 2); - assertEquals(types[0], String.class); - assertEquals(types[1], Integer.class); + assertEquals(types[0].asErasure().getTypeName(), String.class.getName()); + assertEquals(types[1].asErasure().getTypeName(), Integer.class.getName()); } } diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java index 8b4470d8c76c7..954eef44a7366 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java @@ -18,13 +18,24 @@ */ package org.apache.pulsar.functions.utils; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; +import static org.apache.pulsar.common.functions.FunctionConfig.Runtime.PYTHON; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; import com.google.gson.Gson; - -import org.apache.pulsar.client.api.CompressionType; -import org.apache.pulsar.client.api.SubscriptionInitialPosition; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.util.JsonFormat; +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.client.api.CompressionType; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.impl.schema.JSONSchema; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; @@ -32,28 +43,29 @@ import org.apache.pulsar.common.functions.Resources; import org.apache.pulsar.common.functions.WindowConfig; import org.apache.pulsar.common.util.Reflections; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.functions.api.WindowContext; +import org.apache.pulsar.functions.api.WindowFunction; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; import org.testng.annotations.Test; -import java.lang.reflect.Field; -import java.util.HashMap; -import java.util.Map; - -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; -import static org.apache.pulsar.common.functions.FunctionConfig.Runtime.PYTHON; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertNull; -import static org.testng.Assert.assertThrows; -import static org.testng.Assert.assertTrue; - /** * Unit test of {@link Reflections}. */ @Slf4j public class FunctionConfigUtilsTest { + public static class WordCountWindowFunction implements WindowFunction { + @Override + public Void process(Collection> inputs, WindowContext context) throws Exception { + for (Record input : inputs) { + Arrays.asList(input.getValue().split("\\.")).forEach(word -> context.incrCounter(word, 1)); + } + return null; + } + } + @Test public void testAutoAckConvertFailed() { @@ -63,7 +75,7 @@ public void testAutoAckConvertFailed() { functionConfig.setProcessingGuarantees(FunctionConfig.ProcessingGuarantees.ATMOST_ONCE); assertThrows(IllegalArgumentException.class, () -> { - FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + FunctionConfigUtils.convert(functionConfig); }); } @@ -99,7 +111,7 @@ public void testConvertBackFidelity() { producerConfig.setBatchBuilder("DEFAULT"); producerConfig.setCompressionType(CompressionType.ZLIB); functionConfig.setProducerConfig(producerConfig); - Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); // add default resources @@ -119,7 +131,7 @@ public void testConvertWindow() { functionConfig.setNamespace("test-namespace"); functionConfig.setName("test-function"); functionConfig.setParallelism(1); - functionConfig.setClassName(IdentityFunction.class.getName()); + functionConfig.setClassName(WordCountWindowFunction.class.getName()); Map inputSpecs = new HashMap<>(); inputSpecs.put("test-input", ConsumerConfig.builder().isRegexPattern(true).serdeClassName("test-serde").build()); functionConfig.setInputSpecs(inputSpecs); @@ -141,7 +153,7 @@ public void testConvertWindow() { producerConfig.setBatchBuilder("KEY_BASED"); producerConfig.setCompressionType(CompressionType.SNAPPY); functionConfig.setProducerConfig(producerConfig); - Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); // WindowsFunction guarantees convert to FunctionGuarantees. @@ -158,6 +170,18 @@ public void testConvertWindow() { ); } + @Test + public void testConvertBatchBuilder() { + FunctionConfig functionConfig = createFunctionConfig(); + functionConfig.setBatchBuilder("KEY_BASED"); + + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); + assertEquals(functionDetails.getSink().getProducerSpec().getBatchBuilder(), "KEY_BASED"); + + FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); + assertEquals(convertedConfig.getProducerConfig().getBatchBuilder(), "KEY_BASED"); + } + @Test public void testMergeEqual() { FunctionConfig functionConfig = createFunctionConfig(); @@ -429,6 +453,30 @@ public void testMergeDifferentWindowConfig() { ); } + @Test + public void testMergeDifferentProducerConfig() { + FunctionConfig functionConfig = createFunctionConfig(); + + ProducerConfig producerConfig = new ProducerConfig(); + producerConfig.setMaxPendingMessages(100); + producerConfig.setMaxPendingMessagesAcrossPartitions(1000); + producerConfig.setUseThreadLocalProducers(true); + producerConfig.setBatchBuilder("DEFAULT"); + producerConfig.setCompressionType(CompressionType.ZLIB); + FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("producerConfig", producerConfig); + + FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); + assertEquals( + mergedConfig.getProducerConfig(), + producerConfig + ); + mergedConfig.setProducerConfig(functionConfig.getProducerConfig()); + assertEquals( + new Gson().toJson(functionConfig), + new Gson().toJson(mergedConfig) + ); + } + @Test public void testMergeDifferentTimeout() { FunctionConfig functionConfig = createFunctionConfig(); @@ -483,7 +531,6 @@ private FunctionConfig createFunctionConfig() { functionConfig.setUserConfig(new HashMap<>()); functionConfig.setAutoAck(true); functionConfig.setTimeoutMs(2000L); - functionConfig.setWindowConfig(new WindowConfig().setWindowLengthCount(10)); functionConfig.setCleanupSubscription(true); functionConfig.setRuntimeFlags("-Dfoo=bar"); return functionConfig; @@ -517,7 +564,7 @@ public void testDisableForwardSourceMessageProperty() throws InvalidProtocolBuff config.setForwardSourceMessageProperty(true); FunctionConfigUtils.inferMissingArguments(config, false); assertNull(config.getForwardSourceMessageProperty()); - FunctionDetails details = FunctionConfigUtils.convert(config, FunctionConfigUtilsTest.class.getClassLoader()); + FunctionDetails details = FunctionConfigUtils.convert(config); assertFalse(details.getSink().getForwardSourceMessageProperty()); String detailsJson = "'" + JsonFormat.printer().omittingInsignificantWhitespace().print(details) + "'"; log.info("Function details : {}", detailsJson); @@ -604,7 +651,7 @@ public void testMergeDifferentOutputSchemaTypes() { @Test public void testPoolMessages() { FunctionConfig functionConfig = createFunctionConfig(); - Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); assertFalse(functionDetails.getSource().getInputSpecsMap().get("test-input").getPoolMessages()); FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); assertFalse(convertedConfig.getInputSpecs().get("test-input").isPoolMessages()); @@ -614,7 +661,7 @@ public void testPoolMessages() { .poolMessages(true).build()); functionConfig.setInputSpecs(inputSpecs); - functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + functionDetails = FunctionConfigUtils.convert(functionConfig); assertTrue(functionDetails.getSource().getInputSpecsMap().get("test-input").getPoolMessages()); convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java index 62d6f68d31b2c..14cd77f60ff95 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java @@ -18,37 +18,38 @@ */ package org.apache.pulsar.functions.utils; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATMOST_ONCE; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; import com.google.common.collect.Lists; import com.google.gson.Gson; +import java.io.IOException; +import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import lombok.Data; import lombok.NoArgsConstructor; import lombok.experimental.Accessors; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; import org.apache.pulsar.common.functions.Resources; +import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.config.validation.ConfigValidationAnnotations; -import org.apache.pulsar.functions.api.utils.IdentityFunction; +import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.io.core.Sink; +import org.apache.pulsar.io.core.SinkContext; import org.testng.annotations.Test; -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE; -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATMOST_ONCE; -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertThrows; -import static org.testng.Assert.assertTrue; -import static org.testng.Assert.expectThrows; - /** * Unit test of {@link SinkConfigUtilsTest}. */ @@ -62,6 +63,27 @@ public static class TestSinkConfig { private String configParameter; } + + public static class NopSink implements Sink { + + @Override + public void open(Map config, SinkContext sinkContext) throws Exception { + + } + + @Override + public void write(Record record) throws Exception { + + } + + @Override + public void close() throws Exception { + + } + } + + + @Test public void testAutoAckConvertFailed() throws IOException { @@ -151,6 +173,7 @@ public void testParseRetainOrderingField() throws IOException { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.setRetainOrdering(testcase); Function.FunctionDetails functionDetails = SinkConfigUtils.convert(sinkConfig, new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); + assertEquals(functionDetails.getRetainOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); SinkConfig result = SinkConfigUtils.convertFromDetails(functionDetails); assertEquals(result.getRetainOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); } @@ -163,6 +186,7 @@ public void testParseKeyRetainOrderingField() throws IOException { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.setRetainKeyOrdering(testcase); Function.FunctionDetails functionDetails = SinkConfigUtils.convert(sinkConfig, new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); + assertEquals(functionDetails.getRetainKeyOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); SinkConfig result = SinkConfigUtils.convertFromDetails(functionDetails); assertEquals(result.getRetainKeyOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); } @@ -519,7 +543,7 @@ private SinkConfig createSinkConfig() { sinkConfig.setNamespace("test-namespace"); sinkConfig.setName("test-sink"); sinkConfig.setParallelism(1); - sinkConfig.setClassName(IdentityFunction.class.getName()); + sinkConfig.setClassName(NopSink.class.getName()); Map inputSpecs = new HashMap<>(); inputSpecs.put("test-input", ConsumerConfig.builder().isRegexPattern(true).serdeClassName("test-serde").build()); sinkConfig.setInputSpecs(inputSpecs); @@ -575,13 +599,16 @@ public void testAllowDisableSinkTimeout() { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.setInputSpecs(null); sinkConfig.setTopicsPattern("my-topic-*"); - SinkConfigUtils.validateAndExtractDetails(sinkConfig, this.getClass().getClassLoader(), null, + LoadedFunctionPackage validatableFunction = + new LoadedFunctionPackage(this.getClass().getClassLoader(), ConnectorDefinition.class); + + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunction, null, true); sinkConfig.setTimeoutMs(null); - SinkConfigUtils.validateAndExtractDetails(sinkConfig, this.getClass().getClassLoader(), null, + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunction, null, true); sinkConfig.setTimeoutMs(0L); - SinkConfigUtils.validateAndExtractDetails(sinkConfig, this.getClass().getClassLoader(), null, + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunction, null, true); } } diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java index 49313dbf02c62..a4da4203d9641 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java @@ -285,6 +285,30 @@ public void testMergeDifferentBatchSourceConfig() { ); } + @Test + public void testMergeDifferentProducerConfig() { + SourceConfig sourceConfig = createSourceConfig(); + + ProducerConfig producerConfig = new ProducerConfig(); + producerConfig.setMaxPendingMessages(100); + producerConfig.setMaxPendingMessagesAcrossPartitions(1000); + producerConfig.setUseThreadLocalProducers(true); + producerConfig.setBatchBuilder("DEFAULT"); + producerConfig.setCompressionType(CompressionType.ZLIB); + SourceConfig newSourceConfig = createUpdatedSourceConfig("producerConfig", producerConfig); + + SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig); + assertEquals( + mergedConfig.getProducerConfig(), + producerConfig + ); + mergedConfig.setProducerConfig(sourceConfig.getProducerConfig()); + assertEquals( + new Gson().toJson(sourceConfig), + new Gson().toJson(mergedConfig) + ); + } + @Test public void testValidateConfig() { SourceConfig sourceConfig = createSourceConfig(); diff --git a/pulsar-functions/worker/pom.xml b/pulsar-functions/worker/pom.xml index 1201c5aea5d22..d033ce5e628b6 100644 --- a/pulsar-functions/worker/pom.xml +++ b/pulsar-functions/worker/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.0.0-SNAPSHOT .. - pulsar-functions-worker Pulsar Functions :: Worker - - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-functions-runtime ${project.version} - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-client-admin-original ${project.version} - org.glassfish.jersey.media jersey-media-json-jackson - jakarta.activation jakarta.activation-api - org.glassfish.jersey.core jersey-server - org.glassfish.jersey.containers jersey-container-servlet - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.media jersey-media-multipart - org.eclipse.jetty jetty-server - org.eclipse.jetty jetty-alpn-conscrypt-server - org.eclipse.jetty jetty-servlet - org.eclipse.jetty jetty-servlets - org.apache.distributedlog distributedlog-core @@ -118,17 +100,14 @@ - org.apache.bookkeeper bookkeeper-server - commons-io commons-io - io.swagger swagger-core @@ -143,14 +122,11 @@ - io.prometheus simpleclient_jetty - - ${project.groupId} pulsar-io-cassandra @@ -158,7 +134,6 @@ pom test - ${project.groupId} pulsar-io-twitter @@ -166,7 +141,6 @@ pom test - ${project.groupId} pulsar-functions-api-examples @@ -174,7 +148,6 @@ pom test - ${project.groupId} pulsar-functions-api-examples-builtin @@ -182,9 +155,7 @@ pom test - - @@ -204,7 +175,6 @@ - maven-dependency-plugin @@ -257,7 +227,6 @@ - org.apache.maven.plugins maven-surefire-plugin @@ -273,7 +242,6 @@ - org.apache.maven.plugins @@ -321,5 +289,4 @@ - diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java index c587a8a734888..217789a1077b1 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java @@ -70,6 +70,7 @@ import org.apache.pulsar.functions.utils.Actions; import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.SourceConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.io.Connector; @Data @@ -82,18 +83,21 @@ public class FunctionActioner { private final ConnectorsManager connectorsManager; private final FunctionsManager functionsManager; private final PulsarAdmin pulsarAdmin; + private final PackageUrlValidator packageUrlValidator; public FunctionActioner(WorkerConfig workerConfig, RuntimeFactory runtimeFactory, Namespace dlogNamespace, ConnectorsManager connectorsManager, - FunctionsManager functionsManager, PulsarAdmin pulsarAdmin) { + FunctionsManager functionsManager, PulsarAdmin pulsarAdmin, + PackageUrlValidator packageUrlValidator) { this.workerConfig = workerConfig; this.runtimeFactory = runtimeFactory; this.dlogNamespace = dlogNamespace; this.connectorsManager = connectorsManager; this.functionsManager = functionsManager; this.pulsarAdmin = pulsarAdmin; + this.packageUrlValidator = packageUrlValidator; } @@ -151,6 +155,9 @@ private String getPackageFile(FunctionMetaData functionMetaData, FunctionDetails boolean isPkgUrlProvided = isFunctionPackageUrlSupported(packagePath); String packageFile; if (isPkgUrlProvided && packagePath.startsWith(FILE)) { + if (!packageUrlValidator.isValidPackageUrl(componentType, packagePath)) { + throw new IllegalArgumentException("Package URL " + packagePath + " is not valid"); + } URL url = new URL(packagePath); File pkgFile = new File(url.toURI()); packageFile = pkgFile.getAbsolutePath(); @@ -167,7 +174,7 @@ private String getPackageFile(FunctionMetaData functionMetaData, FunctionDetails pkgDir, new File(getDownloadFileName(functionMetaData.getFunctionDetails(), pkgLocation)).getName()); - downloadFile(pkgFile, isPkgUrlProvided, functionMetaData, instanceId, pkgLocation); + downloadFile(pkgFile, isPkgUrlProvided, functionMetaData, instanceId, pkgLocation, componentType); packageFile = pkgFile.getAbsolutePath(); } return packageFile; @@ -225,7 +232,8 @@ InstanceConfig createInstanceConfig(FunctionDetails functionDetails, Function.Fu } private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaData functionMetaData, - int instanceId, Function.PackageLocationMetaData pkgLocation) + int instanceId, Function.PackageLocationMetaData pkgLocation, + FunctionDetails.ComponentType componentType) throws IOException, PulsarAdminException { FunctionDetails details = functionMetaData.getFunctionDetails(); @@ -250,6 +258,9 @@ private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaDa downloadFromHttp ? pkgLocationPath : pkgLocation); if (downloadFromHttp) { + if (!packageUrlValidator.isValidPackageUrl(componentType, pkgLocationPath)) { + throw new IllegalArgumentException("Package URL " + pkgLocationPath + " is not valid"); + } FunctionCommon.downloadFromHttpUrl(pkgLocationPath, tempPkgFile); } else if (downloadFromPackageManagementService) { getPulsarAdmin().packages().download(pkgLocationPath, tempPkgFile.getPath()); @@ -526,7 +537,7 @@ private File getBuiltinArchive(FunctionDetails.ComponentType componentType, Func builder.setClassName(sourceClass); functionDetails.setSource(builder); - fillSourceTypeClass(functionDetails, connector.getClassLoader(), sourceClass); + fillSourceTypeClass(functionDetails, connector.getConnectorFunctionPackage(), sourceClass); return archive; } } @@ -542,7 +553,7 @@ private File getBuiltinArchive(FunctionDetails.ComponentType componentType, Func builder.setClassName(sinkClass); functionDetails.setSink(builder); - fillSinkTypeClass(functionDetails, connector.getClassLoader(), sinkClass); + fillSinkTypeClass(functionDetails, connector.getConnectorFunctionPackage(), sinkClass); return archive; } } @@ -556,8 +567,8 @@ private File getBuiltinArchive(FunctionDetails.ComponentType componentType, Func } private void fillSourceTypeClass(FunctionDetails.Builder functionDetails, - ClassLoader narClassLoader, String className) throws ClassNotFoundException { - String typeArg = getSourceType(className, narClassLoader).getName(); + ValidatableFunctionPackage functionPackage, String className) { + String typeArg = getSourceType(className, functionPackage.getTypePool()).asErasure().getName(); SourceSpec.Builder sourceBuilder = SourceSpec.newBuilder(functionDetails.getSource()); sourceBuilder.setTypeClassName(typeArg); @@ -572,8 +583,8 @@ private void fillSourceTypeClass(FunctionDetails.Builder functionDetails, } private void fillSinkTypeClass(FunctionDetails.Builder functionDetails, - ClassLoader narClassLoader, String className) throws ClassNotFoundException { - String typeArg = getSinkType(className, narClassLoader).getName(); + ValidatableFunctionPackage functionPackage, String className) { + String typeArg = getSinkType(className, functionPackage.getTypePool()).asErasure().getName(); SinkSpec.Builder sinkBuilder = SinkSpec.newBuilder(functionDetails.getSink()); sinkBuilder.setTypeClassName(typeArg); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java index 8e6725e93af91..b6e2bbb1ca0f8 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java @@ -220,7 +220,8 @@ public FunctionRuntimeManager(WorkerConfig workerConfig, PulsarWorkerService wor functionAuthProvider, runtimeCustomizer); this.functionActioner = new FunctionActioner(this.workerConfig, runtimeFactory, - dlogNamespace, connectorsManager, functionsManager, workerService.getBrokerAdmin()); + dlogNamespace, connectorsManager, functionsManager, workerService.getBrokerAdmin(), + workerService.getPackageUrlValidator()); this.membershipManager = membershipManager; this.functionMetaDataManager = functionMetaDataManager; diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java index 7f035b5562f24..e7816f06aacc8 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java @@ -41,7 +41,7 @@ public class LeaderService implements AutoCloseable, ConsumerEventListener { private ConsumerImpl consumer; private final WorkerConfig workerConfig; private final PulsarClient pulsarClient; - private boolean isLeader = false; + private volatile boolean isLeader = false; static final String COORDINATION_TOPIC_SUBSCRIPTION = "participants"; @@ -172,7 +172,7 @@ public synchronized void becameInactive(Consumer consumer, int partitionId) { } } - public synchronized boolean isLeader() { + public boolean isLeader() { return isLeader; } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PackageUrlValidator.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PackageUrlValidator.java new file mode 100644 index 0000000000000..2a8fe8dddb153 --- /dev/null +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PackageUrlValidator.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.worker; + +import java.net.URI; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.pulsar.functions.proto.Function; + +/** + * Validates package URLs for functions and connectors. + * Validates that the package URL is either a file in the connectors or functions directory + * when referencing connector or function files is enabled, or matches one of the additional url patterns. + */ +public class PackageUrlValidator { + private final Path connectionsDirectory; + private final Path functionsDirectory; + private final List additionalConnectionsPatterns; + private final List additionalFunctionsPatterns; + + public PackageUrlValidator(WorkerConfig workerConfig) { + this.connectionsDirectory = resolveDirectory(workerConfig.getEnableReferencingConnectorDirectoryFiles(), + workerConfig.getConnectorsDirectory()); + this.functionsDirectory = resolveDirectory(workerConfig.getEnableReferencingFunctionsDirectoryFiles(), + workerConfig.getFunctionsDirectory()); + this.additionalConnectionsPatterns = + compilePatterns(workerConfig.getAdditionalEnabledConnectorUrlPatterns()); + this.additionalFunctionsPatterns = + compilePatterns(workerConfig.getAdditionalEnabledFunctionsUrlPatterns()); + } + + private static Path resolveDirectory(Boolean enabled, String directory) { + return enabled != null && enabled + ? Path.of(directory).normalize().toAbsolutePath() : null; + } + + private static List compilePatterns(List additionalPatterns) { + return additionalPatterns != null ? additionalPatterns.stream().map(Pattern::compile).collect( + Collectors.toList()) : Collections.emptyList(); + } + + boolean isValidFunctionsPackageUrl(URI functionPkgUrl) { + return doesMatch(functionPkgUrl, functionsDirectory, additionalFunctionsPatterns); + } + + boolean isValidConnectionsPackageUrl(URI functionPkgUrl) { + return doesMatch(functionPkgUrl, connectionsDirectory, additionalConnectionsPatterns); + } + + private boolean doesMatch(URI functionPkgUrl, Path directory, List patterns) { + if (directory != null && "file".equals(functionPkgUrl.getScheme())) { + Path filePath = Path.of(functionPkgUrl.getPath()).normalize().toAbsolutePath(); + if (filePath.startsWith(directory)) { + return true; + } + } + String functionPkgUrlString = functionPkgUrl.normalize().toString(); + for (Pattern pattern : patterns) { + if (pattern.matcher(functionPkgUrlString).matches()) { + return true; + } + } + return false; + } + + public boolean isValidPackageUrl(Function.FunctionDetails.ComponentType componentType, String functionPkgUrl) { + URI uri = URI.create(functionPkgUrl); + if (componentType == null) { + // if component type is not specified, we need to check both functions and connections + return isValidFunctionsPackageUrl(uri) || isValidConnectionsPackageUrl(uri); + } + switch (componentType) { + case FUNCTION: + return isValidFunctionsPackageUrl(uri); + case SINK: + case SOURCE: + return isValidConnectionsPackageUrl(uri); + default: + throw new IllegalArgumentException("Unknown component type: " + componentType); + } + } +} diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java index 8a9ea22c53015..84b943e5671ac 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java @@ -119,7 +119,8 @@ public interface PulsarClientCreator { private Sinks sinks; private Sources sources; private Workers workers; - + @Getter + private PackageUrlValidator packageUrlValidator; private final PulsarClientCreator clientCreator; public PulsarWorkerService() { @@ -196,6 +197,7 @@ public void init(WorkerConfig workerConfig, this.sinks = new SinksImpl(() -> PulsarWorkerService.this); this.sources = new SourcesImpl(() -> PulsarWorkerService.this); this.workers = new WorkerImpl(() -> PulsarWorkerService.this); + this.packageUrlValidator = new PackageUrlValidator(workerConfig); } @Override @@ -658,6 +660,14 @@ public void stop() { if (statsUpdater != null) { statsUpdater.shutdownNow(); } + + if (null != functionsManager) { + functionsManager.close(); + } + + if (null != connectorsManager) { + connectorsManager.close(); + } } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java index 7daa9c9aee8ed..6d07e5870917a 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java @@ -100,6 +100,7 @@ import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.FunctionConfigUtils; import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; @@ -1375,11 +1376,17 @@ private StreamingOutput getStreamingOutput(String pkgPath) { private StreamingOutput getStreamingOutput(String pkgPath, FunctionDetails.ComponentType componentType) { return output -> { if (pkgPath.startsWith(Utils.HTTP)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, pkgPath)) { + throw new IllegalArgumentException("Invalid package url: " + pkgPath); + } URL url = URI.create(pkgPath).toURL(); try (InputStream inputStream = url.openStream()) { IOUtils.copy(inputStream, output); } } else if (pkgPath.startsWith(Utils.FILE)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, pkgPath)) { + throw new IllegalArgumentException("Invalid package url: " + pkgPath); + } URI url = URI.create(pkgPath); File file = new File(url.getPath()); Files.copy(file.toPath(), output); @@ -1437,7 +1444,7 @@ public StreamingOutput downloadFunction(final String path, final AuthenticationP } if (worker().getWorkerConfig().isAuthorizationEnabled()) { - // to maintain backwards compatiblity but still have authorization + // to maintain backwards compatibility but still have authorization String[] tokens = path.split("/"); if (tokens.length == 4) { String tenant = tokens[0]; @@ -1788,12 +1795,6 @@ private void internalProcessFunctionRequest(final String tenant, final String na } } - protected ClassLoader getClassLoaderFromPackage(String className, - File packageFile, - String narExtractionDirectory) { - return FunctionCommon.getClassLoaderFromPackage(componentType, className, packageFile, narExtractionDirectory); - } - static File downloadPackageFile(PulsarWorkerService worker, String packageName) throws IOException, PulsarAdminException { Path tempDirectory; @@ -1809,12 +1810,17 @@ static File downloadPackageFile(PulsarWorkerService worker, String packageName) return file; } - protected File getPackageFile(String functionPkgUrl, String existingPackagePath, InputStream uploadedInputStream) + protected File getPackageFile(FunctionDetails.ComponentType componentType, String functionPkgUrl, + String existingPackagePath, InputStream uploadedInputStream) throws IOException, PulsarAdminException { File componentPackageFile = null; if (isNotBlank(functionPkgUrl)) { - componentPackageFile = getPackageFile(functionPkgUrl); + componentPackageFile = getPackageFile(componentType, functionPkgUrl); } else if (existingPackagePath.startsWith(Utils.FILE) || existingPackagePath.startsWith(Utils.HTTP)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, functionPkgUrl)) { + throw new IllegalArgumentException("Function Package url is not valid." + + "supported url (http/https/file)"); + } try { componentPackageFile = FunctionCommon.extractFileFromPkgURL(existingPackagePath); } catch (Exception e) { @@ -1822,6 +1828,8 @@ protected File getPackageFile(String functionPkgUrl, String existingPackagePath, + "when getting %s package from %s", e.getMessage(), ComponentTypeUtils.toString(componentType), functionPkgUrl)); } + } else if (Utils.hasPackageTypePrefix(existingPackagePath)) { + componentPackageFile = getPackageFile(componentType, existingPackagePath); } else if (uploadedInputStream != null) { componentPackageFile = WorkerUtils.dumpToTmpFile(uploadedInputStream); } else if (!existingPackagePath.startsWith(Utils.BUILTIN)) { @@ -1839,15 +1847,16 @@ protected File getPackageFile(String functionPkgUrl, String existingPackagePath, return componentPackageFile; } - protected File downloadPackageFile(String packageName) throws IOException, PulsarAdminException { - return downloadPackageFile(worker(), packageName); - } - - protected File getPackageFile(String functionPkgUrl) throws IOException, PulsarAdminException { + protected File getPackageFile(FunctionDetails.ComponentType componentType, String functionPkgUrl) + throws IOException, PulsarAdminException { if (Utils.hasPackageTypePrefix(functionPkgUrl)) { - return downloadPackageFile(functionPkgUrl); + if (!worker().getWorkerConfig().isFunctionsWorkerEnablePackageManagement()) { + throw new IllegalStateException("Function Package management service is disabled. " + + "Please enable it to use " + functionPkgUrl); + } + return downloadPackageFile(worker(), functionPkgUrl); } else { - if (!Utils.isFunctionPackageUrlSupported(functionPkgUrl)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, functionPkgUrl)) { throw new IllegalArgumentException("Function Package url is not valid." + "supported url (http/https/file)"); } @@ -1861,7 +1870,7 @@ protected File getPackageFile(String functionPkgUrl) throws IOException, PulsarA } } - protected ClassLoader getBuiltinFunctionClassLoader(String archive) { + protected ValidatableFunctionPackage getBuiltinFunctionPackage(String archive) { if (!StringUtils.isEmpty(archive)) { if (archive.startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN)) { archive = archive.replaceFirst("^builtin://", ""); @@ -1871,7 +1880,7 @@ protected ClassLoader getBuiltinFunctionClassLoader(String archive) { if (function == null) { throw new IllegalArgumentException("Built-in " + componentType + " is not available"); } - return function.getClassLoader(); + return function.getFunctionPackage(); } } return null; diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java index c7967600da5f6..a075d3e18a0b3 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java @@ -47,7 +47,6 @@ import org.apache.pulsar.common.functions.WorkerInfo; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.FunctionStatus; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; @@ -55,10 +54,14 @@ import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.utils.ComponentTypeUtils; import org.apache.pulsar.functions.utils.FunctionConfigUtils; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.FunctionsManager; import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Functions; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @@ -141,7 +144,7 @@ public void registerFunction(final String tenant, // validate parameters try { if (isNotBlank(functionPkgUrl)) { - componentPackageFile = getPackageFile(functionPkgUrl); + componentPackageFile = getPackageFile(componentType, functionPkgUrl); functionDetails = validateUpdateRequestParams(tenant, namespace, functionName, functionConfig, componentPackageFile); } else { @@ -289,7 +292,8 @@ public void updateFunction(final String tenant, throw new RestException(Response.Status.BAD_REQUEST, e.getMessage()); } - if (existingFunctionConfig.equals(mergedConfig) && isBlank(functionPkgUrl) && uploadedInputStream == null) { + if (existingFunctionConfig.equals(mergedConfig) && isBlank(functionPkgUrl) && uploadedInputStream == null + && (updateOptions == null || !updateOptions.isUpdateAuthData())) { log.error("{}/{}/{} Update contains no changes", tenant, namespace, functionName); throw new RestException(Response.Status.BAD_REQUEST, "Update contains no change"); } @@ -301,6 +305,7 @@ public void updateFunction(final String tenant, // validate parameters try { componentPackageFile = getPackageFile( + componentType, functionPkgUrl, existingComponent.getPackageLocation().getPackagePath(), uploadedInputStream); @@ -374,8 +379,10 @@ public void updateFunction(final String tenant, Function.PackageLocationMetaData.Builder packageLocationMetaDataBuilder; if (isNotBlank(functionPkgUrl) || uploadedInputStream != null) { + Function.FunctionMetaData metaData = functionMetaDataBuilder.build(); + metaData = FunctionMetaDataUtils.incrMetadataVersion(metaData, metaData); try { - packageLocationMetaDataBuilder = getFunctionPackageLocation(functionMetaDataBuilder.build(), + packageLocationMetaDataBuilder = getFunctionPackageLocation(metaData, functionPkgUrl, fileDetail, componentPackageFile); } catch (Exception e) { log.error("Failed process {} {}/{}/{} package: ", ComponentTypeUtils.toString(componentType), @@ -728,11 +735,12 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant functionConfig.setTenant(tenant); functionConfig.setNamespace(namespace); functionConfig.setName(componentName); + WorkerConfig workerConfig = worker().getWorkerConfig(); FunctionConfigUtils.inferMissingArguments( - functionConfig, worker().getWorkerConfig().isForwardSourceMessageProperty()); + functionConfig, workerConfig.isForwardSourceMessageProperty()); String archive = functionConfig.getJar(); - ClassLoader classLoader = null; + ValidatableFunctionPackage functionPackage = null; // check if function is builtin and extract classloader if (!StringUtils.isEmpty(archive)) { if (archive.startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN)) { @@ -745,35 +753,38 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant if (function == null) { throw new IllegalArgumentException(String.format("No Function %s found", archive)); } - classLoader = function.getClassLoader(); + functionPackage = function.getFunctionPackage(); } } - boolean shouldCloseClassLoader = false; + boolean shouldCloseFunctionPackage = false; try { - if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA) { // if function is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && componentPackageFile != null) { - classLoader = getClassLoaderFromPackage(functionConfig.getClassName(), - componentPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - shouldCloseClassLoader = true; + if (functionPackage == null && componentPackageFile != null) { + functionPackage = + new FunctionFilePackage(componentPackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), FunctionDefinition.class); + shouldCloseFunctionPackage = true; } - if (classLoader == null) { + if (functionPackage == null) { throw new IllegalArgumentException("Function package is not provided"); } FunctionConfigUtils.ExtractedFunctionDetails functionDetails = FunctionConfigUtils.validateJavaFunction( - functionConfig, classLoader); + functionConfig, functionPackage); return FunctionConfigUtils.convert(functionConfig, functionDetails); } else { - classLoader = FunctionConfigUtils.validate(functionConfig, componentPackageFile); - shouldCloseClassLoader = true; - return FunctionConfigUtils.convert(functionConfig, classLoader); + FunctionConfigUtils.validateNonJavaFunction(functionConfig); + return FunctionConfigUtils.convert(functionConfig); } } finally { - if (shouldCloseClassLoader) { - ClassLoaderUtils.closeClassLoader(classLoader); + if (shouldCloseFunctionPackage && functionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) functionPackage).close(); + } catch (Exception e) { + log.error("Failed to close function file", e); + } } } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java index 98450c4a0b5d0..6b8b41e5a8e5b 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java @@ -47,17 +47,20 @@ import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.SinkStatus; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.utils.ComponentTypeUtils; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; import org.apache.pulsar.functions.utils.SinkConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Sinks; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @@ -140,7 +143,7 @@ public void registerSink(final String tenant, // validate parameters try { if (isNotBlank(sinkPkgUrl)) { - componentPackageFile = getPackageFile(sinkPkgUrl); + componentPackageFile = getPackageFile(componentType, sinkPkgUrl); functionDetails = validateUpdateRequestParams(tenant, namespace, sinkName, sinkConfig, componentPackageFile); } else { @@ -294,7 +297,8 @@ public void updateSink(final String tenant, throw new RestException(Response.Status.BAD_REQUEST, e.getMessage()); } - if (existingSinkConfig.equals(mergedConfig) && isBlank(sinkPkgUrl) && uploadedInputStream == null) { + if (existingSinkConfig.equals(mergedConfig) && isBlank(sinkPkgUrl) && uploadedInputStream == null + && (updateOptions == null || !updateOptions.isUpdateAuthData())) { log.error("{}/{}/{} Update contains no changes", tenant, namespace, sinkName); throw new RestException(Response.Status.BAD_REQUEST, "Update contains no change"); } @@ -306,6 +310,7 @@ public void updateSink(final String tenant, // validate parameters try { componentPackageFile = getPackageFile( + componentType, sinkPkgUrl, existingComponent.getPackageLocation().getPackagePath(), uploadedInputStream); @@ -378,8 +383,10 @@ public void updateSink(final String tenant, Function.PackageLocationMetaData.Builder packageLocationMetaDataBuilder; if (isNotBlank(sinkPkgUrl) || uploadedInputStream != null) { + Function.FunctionMetaData metaData = functionMetaDataBuilder.build(); + metaData = FunctionMetaDataUtils.incrMetadataVersion(metaData, metaData); try { - packageLocationMetaDataBuilder = getFunctionPackageLocation(functionMetaDataBuilder.build(), + packageLocationMetaDataBuilder = getFunctionPackageLocation(metaData, sinkPkgUrl, fileDetail, componentPackageFile); } catch (Exception e) { log.error("Failed process {} {}/{}/{} package: ", ComponentTypeUtils.toString(componentType), @@ -415,7 +422,8 @@ private void setTransformFunctionPackageLocation(Function.FunctionMetaData.Build try { String builtin = functionDetails.getBuiltin(); if (isBlank(builtin)) { - functionPackageFile = getPackageFile(transformFunction); + functionPackageFile = + getPackageFile(Function.FunctionDetails.ComponentType.FUNCTION, transformFunction); } Function.PackageLocationMetaData.Builder functionPackageLocation = getFunctionPackageLocation(functionMetaDataBuilder.build(), @@ -700,7 +708,7 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant sinkConfig.setName(sinkName); org.apache.pulsar.common.functions.Utils.inferMissingArguments(sinkConfig); - ClassLoader classLoader = null; + ValidatableFunctionPackage connectorFunctionPackage = null; // check if sink is builtin and extract classloader if (!StringUtils.isEmpty(sinkConfig.getArchive())) { String archive = sinkConfig.getArchive(); @@ -712,45 +720,63 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant if (connector == null) { throw new IllegalArgumentException("Built-in sink is not available"); } - classLoader = connector.getClassLoader(); + connectorFunctionPackage = connector.getConnectorFunctionPackage(); } } - boolean shouldCloseClassLoader = false; + boolean shouldCloseFunctionPackage = false; + ValidatableFunctionPackage transformFunctionPackage = null; + boolean shouldCloseTransformFunctionPackage = false; try { // if sink is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && sinkPackageFile != null) { - classLoader = getClassLoaderFromPackage(sinkConfig.getClassName(), - sinkPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - shouldCloseClassLoader = true; + WorkerConfig workerConfig = worker().getWorkerConfig(); + if (connectorFunctionPackage == null && sinkPackageFile != null) { + connectorFunctionPackage = + new FunctionFilePackage(sinkPackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), ConnectorDefinition.class); + shouldCloseFunctionPackage = true; } - if (classLoader == null) { + if (connectorFunctionPackage == null) { throw new IllegalArgumentException("Sink package is not provided"); } - ClassLoader functionClassLoader = null; if (isNotBlank(sinkConfig.getTransformFunction())) { - functionClassLoader = - getBuiltinFunctionClassLoader(sinkConfig.getTransformFunction()); - if (functionClassLoader == null) { - File functionPackageFile = getPackageFile(sinkConfig.getTransformFunction()); - functionClassLoader = getClassLoaderFromPackage(sinkConfig.getTransformFunctionClassName(), - functionPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); + transformFunctionPackage = + getBuiltinFunctionPackage(sinkConfig.getTransformFunction()); + if (transformFunctionPackage == null) { + File functionPackageFile = getPackageFile(Function.FunctionDetails.ComponentType.FUNCTION, + sinkConfig.getTransformFunction()); + transformFunctionPackage = + new FunctionFilePackage(functionPackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), ConnectorDefinition.class); + shouldCloseTransformFunctionPackage = true; } - if (functionClassLoader == null) { + if (transformFunctionPackage == null) { throw new IllegalArgumentException("Transform Function package not found"); } } - SinkConfigUtils.ExtractedSinkDetails sinkDetails = SinkConfigUtils.validateAndExtractDetails( - sinkConfig, classLoader, functionClassLoader, worker().getWorkerConfig().getValidateConnectorConfig()); + SinkConfigUtils.ExtractedSinkDetails sinkDetails = + SinkConfigUtils.validateAndExtractDetails(sinkConfig, connectorFunctionPackage, + transformFunctionPackage, workerConfig.getValidateConnectorConfig()); return SinkConfigUtils.convert(sinkConfig, sinkDetails); } finally { - if (shouldCloseClassLoader) { - ClassLoaderUtils.closeClassLoader(classLoader); + if (shouldCloseFunctionPackage && connectorFunctionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) connectorFunctionPackage).close(); + } catch (Exception e) { + log.error("Failed to connector function file", e); + } + } + if (shouldCloseTransformFunctionPackage && transformFunctionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) transformFunctionPackage).close(); + } catch (Exception e) { + log.error("Failed to close transform function file", e); + } } } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java index 876c7e7572e78..5191306146951 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java @@ -46,17 +46,20 @@ import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.SourceStatus; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.utils.ComponentTypeUtils; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; import org.apache.pulsar.functions.utils.SourceConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Sources; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @@ -140,7 +143,7 @@ public void registerSource(final String tenant, // validate parameters try { if (isPkgUrlProvided) { - componentPackageFile = getPackageFile(sourcePkgUrl); + componentPackageFile = getPackageFile(componentType, sourcePkgUrl); functionDetails = validateUpdateRequestParams(tenant, namespace, sourceName, sourceConfig, componentPackageFile); } else { @@ -288,7 +291,8 @@ public void updateSource(final String tenant, throw new RestException(Response.Status.BAD_REQUEST, e.getMessage()); } - if (existingSourceConfig.equals(mergedConfig) && isBlank(sourcePkgUrl) && uploadedInputStream == null) { + if (existingSourceConfig.equals(mergedConfig) && isBlank(sourcePkgUrl) && uploadedInputStream == null + && (updateOptions == null || !updateOptions.isUpdateAuthData())) { log.error("{}/{}/{} Update contains no changes", tenant, namespace, sourceName); throw new RestException(Response.Status.BAD_REQUEST, "Update contains no change"); } @@ -300,6 +304,7 @@ public void updateSource(final String tenant, // validate parameters try { componentPackageFile = getPackageFile( + componentType, sourcePkgUrl, existingComponent.getPackageLocation().getPackagePath(), uploadedInputStream); @@ -372,8 +377,10 @@ public void updateSource(final String tenant, Function.PackageLocationMetaData.Builder packageLocationMetaDataBuilder; if (isNotBlank(sourcePkgUrl) || uploadedInputStream != null) { + Function.FunctionMetaData metaData = functionMetaDataBuilder.build(); + metaData = FunctionMetaDataUtils.incrMetadataVersion(metaData, metaData); try { - packageLocationMetaDataBuilder = getFunctionPackageLocation(functionMetaDataBuilder.build(), + packageLocationMetaDataBuilder = getFunctionPackageLocation(metaData, sourcePkgUrl, fileDetail, componentPackageFile); } catch (Exception e) { log.error("Failed process {} {}/{}/{} package: ", ComponentTypeUtils.toString(componentType), @@ -659,7 +666,7 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant sourceConfig.setName(sourceName); org.apache.pulsar.common.functions.Utils.inferMissingArguments(sourceConfig); - ClassLoader classLoader = null; + ValidatableFunctionPackage connectorFunctionPackage = null; // check if source is builtin and extract classloader if (!StringUtils.isEmpty(sourceConfig.getArchive())) { String archive = sourceConfig.getArchive(); @@ -671,30 +678,37 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant if (connector == null) { throw new IllegalArgumentException("Built-in source is not available"); } - classLoader = connector.getClassLoader(); + connectorFunctionPackage = connector.getConnectorFunctionPackage(); } } - boolean shouldCloseClassLoader = false; + boolean shouldCloseFunctionPackage = false; try { // if source is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && sourcePackageFile != null) { - classLoader = getClassLoaderFromPackage(sourceConfig.getClassName(), - sourcePackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - shouldCloseClassLoader = true; + WorkerConfig workerConfig = worker().getWorkerConfig(); + if (connectorFunctionPackage == null && sourcePackageFile != null) { + connectorFunctionPackage = + new FunctionFilePackage(sourcePackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), ConnectorDefinition.class); + shouldCloseFunctionPackage = true; } - if (classLoader == null) { + if (connectorFunctionPackage == null) { throw new IllegalArgumentException("Source package is not provided"); } SourceConfigUtils.ExtractedSourceDetails sourceDetails = SourceConfigUtils.validateAndExtractDetails( - sourceConfig, classLoader, worker().getWorkerConfig().getValidateConnectorConfig()); + sourceConfig, connectorFunctionPackage, + workerConfig.getValidateConnectorConfig()); return SourceConfigUtils.convert(sourceConfig, sourceDetails); } finally { - if (shouldCloseClassLoader) { - ClassLoaderUtils.closeClassLoader(classLoader); + if (shouldCloseFunctionPackage && connectorFunctionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) connectorFunctionPackage).close(); + } catch (Exception e) { + log.error("Failed to connector function file", e); + } } } } diff --git a/pulsar-functions/worker/src/main/resources/findbugsExclude.xml b/pulsar-functions/worker/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-functions/worker/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/worker/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java index 4e4c3d2f234aa..ac5ca617ea43b 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java @@ -30,6 +30,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; import static org.testng.AssertJUnit.fail; +import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.distributedlog.api.namespace.Namespace; @@ -77,7 +78,8 @@ public void testStartFunctionWithDLNamespace() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class)); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class), + mock(PackageUrlValidator.class)); Function.FunctionMetaData function1 = Function.FunctionMetaData.newBuilder() .setFunctionDetails(Function.FunctionDetails.newBuilder().setTenant("test-tenant") .setNamespace("test-namespace").setName("func-1")) @@ -109,6 +111,8 @@ public void testStartFunctionWithPkgUrl() throws Exception { workerConfig.setPulsarServiceUrl("pulsar://localhost:6650"); workerConfig.setStateStorageServiceUrl("foo"); workerConfig.setFunctionAssignmentTopicName("assignments"); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(List.of("file:///user/.*", "http://invalid/.*")); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(List.of("file:///user/.*", "http://invalid/.*")); String downloadDir = this.getClass().getProtectionDomain().getCodeSource().getLocation().getPath(); workerConfig.setDownloadDirectory(downloadDir); @@ -122,11 +126,12 @@ public void testStartFunctionWithPkgUrl() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class)); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class), + new PackageUrlValidator(workerConfig)); // (1) test with file url. functionActioner should be able to consider file-url and it should be able to call // RuntimeSpawner - String pkgPathLocation = FILE + ":/user/my-file.jar"; + String pkgPathLocation = FILE + ":///user/my-file.jar"; startFunction(actioner, pkgPathLocation, pkgPathLocation); verify(runtime, times(1)).start(); @@ -194,7 +199,8 @@ public void testFunctionAuthDisabled() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class)); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class), + mock(PackageUrlValidator.class)); String pkgPathLocation = "http://invalid/my-file.jar"; @@ -257,7 +263,8 @@ public void testStartFunctionWithPackageUrl() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), pulsarAdmin); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), pulsarAdmin, + mock(PackageUrlValidator.class)); // (1) test with file url. functionActioner should be able to consider file-url and it should be able to call // RuntimeSpawner diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java index bc56b1766d39d..7b57a1c896539 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java @@ -720,7 +720,7 @@ public void testExternallyManagedRuntimeUpdate() throws Exception { FunctionActioner functionActioner = spy(new FunctionActioner( workerConfig, - kubernetesRuntimeFactory, null, null, null, null)); + kubernetesRuntimeFactory, null, null, null, null, workerService.getPackageUrlValidator())); try (final MockedStatic runtimeFactoryMockedStatic = Mockito .mockStatic(RuntimeFactory.class);) { diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java index 8da24fd1b7250..5c10a59bd1388 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java @@ -36,6 +36,7 @@ import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; @@ -76,7 +77,8 @@ public LeaderServiceTest() { @BeforeMethod public void setup() throws PulsarClientException { mockClient = mock(PulsarClientImpl.class); - + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); mockConsumer = mock(ConsumerImpl.class); ConsumerBuilder mockConsumerBuilder = mock(ConsumerBuilder.class); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java index e066bb24e6ef0..ac3176b3135e2 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java @@ -41,6 +41,7 @@ import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.ReaderBuilder; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.functions.WorkerInfo; @@ -68,7 +69,8 @@ public MembershipManagerTest() { private static PulsarClient mockPulsarClient() throws PulsarClientException { PulsarClientImpl mockClient = mock(PulsarClientImpl.class); - + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); ConsumerImpl mockConsumer = mock(ConsumerImpl.class); ConsumerBuilder mockConsumerBuilder = mock(ConsumerBuilder.class); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java index cfe087c78406a..73a229893e5d2 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java @@ -381,6 +381,6 @@ public static FunctionConfig createDefaultFunctionConfig() { public static Function.FunctionDetails createDefaultFunctionDetails() { FunctionConfig functionConfig = createDefaultFunctionConfig(); - return FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + return FunctionConfigUtils.convert(functionConfig); } } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java index 32a104c576993..a8415919c119b 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java @@ -18,1125 +18,82 @@ */ package org.apache.pulsar.functions.worker.rest.api.v2; - -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.apache.pulsar.functions.utils.FunctionCommon.mergeJson; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import com.google.common.collect.Lists; import com.google.gson.Gson; -import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.util.JsonFormat; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; -import org.apache.distributedlog.api.namespace.Namespace; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.PulsarAdmin; -import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; import org.apache.pulsar.common.functions.FunctionConfig; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.util.RestException; -import org.apache.pulsar.functions.api.Context; -import org.apache.pulsar.functions.api.Function; -import org.apache.pulsar.functions.instance.InstanceUtils; -import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.proto.Function.FunctionMetaData; -import org.apache.pulsar.functions.proto.Function.PackageLocationMetaData; -import org.apache.pulsar.functions.proto.Function.ProcessingGuarantees; -import org.apache.pulsar.functions.proto.Function.SinkSpec; -import org.apache.pulsar.functions.proto.Function.SourceSpec; -import org.apache.pulsar.functions.proto.Function.SubscriptionType; -import org.apache.pulsar.functions.runtime.RuntimeFactory; -import org.apache.pulsar.functions.source.TopicSchema; -import org.apache.pulsar.functions.utils.FunctionCommon; +import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.utils.FunctionConfigUtils; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; -import org.apache.pulsar.functions.worker.WorkerConfig; -import org.apache.pulsar.functions.worker.WorkerUtils; -import org.apache.pulsar.functions.worker.rest.api.FunctionsImpl; import org.apache.pulsar.functions.worker.rest.api.FunctionsImplV2; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; +import org.apache.pulsar.functions.worker.rest.api.v3.AbstractFunctionApiResourceTest; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -/** - * Unit test of {@link FunctionsApiV2Resource}. - */ -public class FunctionApiV2ResourceTest { - - private static final class TestFunction implements Function { - - @Override - public String process(String input, Context context) { - return input; - } - } - - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; - private static final String function = "test-function"; - private static final String outputTopic = "test-output-topic"; - private static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; - private static final String className = TestFunction.class.getName(); - private SubscriptionType subscriptionType = SubscriptionType.FAILOVER; - private static final Map topicsToSerDeClassName = new HashMap<>(); - static { - topicsToSerDeClassName.put("persistent://public/default/test_src", TopicSchema.DEFAULT_SERDE); - } - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; +public class FunctionApiV2ResourceTest extends AbstractFunctionApiResourceTest { private FunctionsImplV2 resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetadata; - private LeaderService mockedLeaderService; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - private static Map mockStaticContexts = new HashMap<>(); - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedFunctionMetadata = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetadata); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); - - FunctionsImpl functions = spy(new FunctionsImpl(() -> mockedWorkerService)); - - this.resource = spy(new FunctionsImplV2(functions)); - - } - - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); - } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); - } - - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = - mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - private void mockWorkerUtils() { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - }); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); - } - - private void mockInstanceUtils() { - mockStatic(InstanceUtils.class, ctx -> { - ctx.when(() -> InstanceUtils.calculateSubjectType(any())) - .thenReturn(FunctionDetails.ComponentType.FUNCTION); - }); - } - - // - // Register Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testRegisterFunctionMissingTenant() { - try { - testRegisterFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testRegisterFunctionMissingNamespace() { - try { - testRegisterFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testRegisterFunctionMissingFunctionName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package is not provided") - public void testRegisterFunctionMissingPackage() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "No input topic\\(s\\) specified for the function") - public void testRegisterFunctionMissingInputTopics() throws Exception { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function Package is not provided") - public void testRegisterFunctionMissingPackageDetails() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package does not have" - + " the correct format. Pulsar cannot determine if the package is a NAR package or JAR package. Function " - + "classname is not provided and attempts to load it as a NAR package produced the following error.*") - public void testRegisterFunctionMissingClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class UnknownClass must be in class path") - public void testRegisterFunctionWrongClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - "UnknownClass", - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function parallelism must be a positive number") - public void testRegisterFunctionWrongParallelism() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - -2, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, - expectedExceptionsMessageRegExp = "Output topic persistent://public/default/test_src is also being used as an input topic \\(topics must be one or the other\\)") - public void testRegisterFunctionSameInputOutput() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - topicsToSerDeClassName.keySet().iterator().next(), - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Output topic " + function + - "-output-topic/test:" + " is invalid") - public void testRegisterFunctionWrongOutputTopic() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - function + "-output-topic/test:", - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Encountered error .*. when getting Function package from .*") - public void testRegisterFunctionHttpUrl() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "http://localhost:1234/test"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testRegisterFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String functionPkgUrl) { - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenCallRealMethod(); - }); - - try { - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - details, - functionPkgUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - private void registerDefaultFunction() { - FunctionConfig functionConfig = createDefaultFunctionConfig(); - try { - resource.registerFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function already exists") - public void testRegisterExistedFunction() { - try { - Configurator.setRootLevel(Level.DEBUG); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testRegisterFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new IOException("upload failure")); - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testRegisterFunctionSuccess() throws Exception { - try { - try (MockedStatic mocked = Mockito.mockStatic(WorkerUtils.class)) { - mocked.when(() -> WorkerUtils.uploadToBookKeeper( - any(Namespace.class), - any(InputStream.class), - anyString())).thenAnswer((i) -> null); - mocked.when(() -> WorkerUtils.dumpToTmpFile(any())).thenAnswer(i -> - { - try { - File tmpFile = FunctionCommon.createPkgTempFile(); - tmpFile.deleteOnExit(); - Files.copy((InputStream) i.getArguments()[0], tmpFile.toPath(), REPLACE_EXISTING); - return tmpFile; - } catch (IOException e) { - throw new RuntimeException("Cannot create a temporary file", e); - } - - } - ); - WorkerUtils.uploadToBookKeeper(null, null, null); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace does not exist") - public void testRegisterFunctionNonExistingNamespace() { - try { - this.namespaceList.clear(); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant does not exist") - public void testRegisterFunctionNonexistantTenant() throws Exception { - try { - when(mockedTenants.getTenantInfo(any())).thenThrow(PulsarAdminException.NotFoundException.class); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testRegisterFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registration interrupted") - public void testRegisterFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - doThrow(new IllegalStateException("Function registration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // Update Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testUpdateFunctionMissingTenant() throws Exception { - try { - testUpdateFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Tenant is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testUpdateFunctionMissingNamespace() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Namespace is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testUpdateFunctionMissingFunctionName() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Function name is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingPackage() throws Exception { - try { - mockWorkerUtils(); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingInputTopic() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingClassName() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test - public void testUpdateFunctionChangedParallelism() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism + 1, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + @Override + protected void doSetup() { + super.doSetup(); + this.resource = spy(new FunctionsImplV2(() -> mockedWorkerService)); } - @Test - public void testUpdateFunctionChangedInputs() throws Exception { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( + protected void registerFunction(String tenant, String namespace, String function, InputStream inputStream, + FormDataContentDisposition details, String functionPkgUrl, + FunctionConfig functionConfig) throws IOException { + resource.registerFunction( tenant, namespace, function, - null, - topicsToSerDeClassName, - mockedFormData, - "DifferentOutput", - outputSerdeClassName, - null, - parallelism, + inputStream, + details, + functionPkgUrl, + JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig)), null); } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Input Topics cannot be altered") - public void testUpdateFunctionChangedOutput() throws Exception { - try { - mockWorkerUtils(); - - Map someOtherInput = new HashMap<>(); - someOtherInput.put("DifferentTopic", TopicSchema.DEFAULT_SERDE); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - someOtherInput, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Input Topics cannot be altered"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testUpdateFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String expectedError) throws Exception { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - if (expectedError != null) { - doThrow(new IllegalArgumentException(expectedError)) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - } - - try { - resource.updateFunction( - tenant, - namespace, - function, - inputStream, - details, - null, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - private void updateDefaultFunction() { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - - try { - resource.updateFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testUpdateNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testUpdateFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new IOException("upload failure")); - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testUpdateFunctionSuccess() throws Exception { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } - - @Test - public void testUpdateFunctionWithUrl() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - try { - resource.updateFunction( - tenant, - namespace, - function, - null, - null, - filePackageUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testUpdateFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registeration interrupted") - public void testUpdateFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function registeration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // deregister function - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testDeregisterFunctionMissingTenant() { - try { - - testDeregisterFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + protected void updateFunction(String tenant, + String namespace, + String functionName, + InputStream uploadedInputStream, + FormDataContentDisposition fileDetail, + String functionPkgUrl, + FunctionConfig functionConfig, + AuthenticationParameters authParams, + UpdateOptionsImpl updateOptions) throws IOException { + resource.updateFunction(tenant, namespace, functionName, uploadedInputStream, fileDetail, functionPkgUrl, + JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig)), authParams); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testDeregisterFunctionMissingNamespace() { - try { - testDeregisterFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; + protected File downloadFunction(final String path, final AuthenticationParameters authParams) + throws IOException { + Response response = resource.downloadFunction(path, authParams); + StreamingOutput streamingOutput = readEntity(response, StreamingOutput.class); + File pkgFile = File.createTempFile("testpkg", "nar"); + try (OutputStream output = new FileOutputStream(pkgFile)) { + streamingOutput.write(output); } + return pkgFile; } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testDeregisterFunctionMissingFunctionName() { - try { - testDeregisterFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + private T readEntity(Response response, Class clazz) { + return clazz.cast(response.getEntity()); } - private void testDeregisterFunctionMissingArguments( + protected void testDeregisterFunctionMissingArguments( String tenant, String namespace, String function @@ -1145,112 +102,18 @@ private void testDeregisterFunctionMissingArguments( tenant, namespace, function, - AuthenticationParameters.builder().build()); + null); } - private void deregisterDefaultFunction() { + protected void deregisterDefaultFunction() { resource.deregisterFunction( tenant, namespace, function, - AuthenticationParameters.builder().build()); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testDeregisterNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); - throw re; - } - } - - @Test - public void testDeregisterFunctionSuccess() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - deregisterDefaultFunction(); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to deregister") - public void testDeregisterFunctionFailure() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to deregister")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function deregisteration interrupted") - public void testDeregisterFunctionInterrupted() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function deregisteration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // Get Function Info - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testGetFunctionMissingTenant() throws IOException { - try { - testGetFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testGetFunctionMissingNamespace() throws IOException { - try { - testGetFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testGetFunctionMissingFunctionName() throws IOException { - try { - testGetFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + null); } - private void testGetFunctionMissingArguments( + protected void testGetFunctionMissingArguments( String tenant, String namespace, String function @@ -1258,20 +121,36 @@ private void testGetFunctionMissingArguments( resource.getFunctionInfo( tenant, namespace, - function, - AuthenticationParameters.builder().build() + function, null + ); + } + + protected void testListFunctionsMissingArguments( + String tenant, + String namespace + ) { + resource.listFunctions( + tenant, + namespace, null ); } - private FunctionDetails getDefaultFunctionInfo() throws IOException { + protected List listDefaultFunctions() { + return new Gson().fromJson(readEntity(resource.listFunctions( + tenant, + namespace, null + ), String.class), List.class); + } + + private Function.FunctionDetails getDefaultFunctionInfo() throws IOException { String json = (String) resource.getFunctionInfo( tenant, namespace, function, AuthenticationParameters.builder().build() ).getEntity(); - FunctionDetails.Builder functionDetailsBuilder = FunctionDetails.newBuilder(); + Function.FunctionDetails.Builder functionDetailsBuilder = Function.FunctionDetails.newBuilder(); mergeJson(json, functionDetailsBuilder); return functionDetailsBuilder.build(); } @@ -1292,218 +171,31 @@ public void testGetFunctionSuccess() throws IOException { mockInstanceUtils(); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - SinkSpec sinkSpec = SinkSpec.newBuilder() + Function.SinkSpec sinkSpec = Function.SinkSpec.newBuilder() .setTopic(outputTopic) .setSerDeClassName(outputSerdeClassName).build(); - FunctionDetails functionDetails = FunctionDetails.newBuilder() + Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder() .setClassName(className) .setSink(sinkSpec) .setName(function) .setNamespace(namespace) - .setProcessingGuarantees(ProcessingGuarantees.ATMOST_ONCE) + .setProcessingGuarantees(Function.ProcessingGuarantees.ATMOST_ONCE) .setAutoAck(true) .setTenant(tenant) .setParallelism(parallelism) - .setSource(SourceSpec.newBuilder().setSubscriptionType(subscriptionType) + .setSource(Function.SourceSpec.newBuilder().setSubscriptionType(subscriptionType) .putAllTopicsToSerDeClassName(topicsToSerDeClassName)).build(); - FunctionMetaData metaData = FunctionMetaData.newBuilder() + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() .setCreateTime(System.currentTimeMillis()) .setFunctionDetails(functionDetails) - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) .setVersion(1234) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - FunctionDetails actual = getDefaultFunctionInfo(); + Function.FunctionDetails actual = getDefaultFunctionInfo(); assertEquals( functionDetails, actual); } - - // - // List Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testListFunctionsMissingTenant() { - try { - testListFunctionsMissingArguments( - null, - namespace - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testListFunctionsMissingNamespace() { - try { - testListFunctionsMissingArguments( - tenant, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testListFunctionsMissingArguments( - String tenant, - String namespace - ) { - resource.listFunctions( - tenant, - namespace, - AuthenticationParameters.builder().build() - ); - - } - - private List listDefaultFunctions() { - return new Gson().fromJson((String) resource.listFunctions( - tenant, - namespace, - AuthenticationParameters.builder().build() - ).getEntity(), List.class); - } - - @Test - public void testListFunctionsSuccess() { - mockInstanceUtils(); - final List functions = Lists.newArrayList("test-1", "test-2"); - final List metaDataList = new LinkedList<>(); - FunctionMetaData functionMetaData1 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-1").build() - ).build(); - FunctionMetaData functionMetaData2 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-2").build() - ).build(); - metaDataList.add(functionMetaData1); - metaDataList.add(functionMetaData2); - when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(metaDataList); - - List functionList = listDefaultFunctions(); - assertEquals(functions, functionList); - } - - @Test - public void testDownloadFunctionHttpUrl() throws Exception { - String jarHttpUrl = - "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; - String testDir = FunctionApiV2ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - FunctionsImplV2 function = new FunctionsImplV2(() -> mockedWorkerService); - StreamingOutput streamOutput = (StreamingOutput) function.downloadFunction(jarHttpUrl, - AuthenticationParameters.builder().build()).getEntity(); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } - } - - @Test - public void testDownloadFunctionFile() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String testDir = FunctionApiV2ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - FunctionsImplV2 function = new FunctionsImplV2(() -> mockedWorkerService); - StreamingOutput streamOutput = (StreamingOutput) function.downloadFunction("file:///" + fileLocation, - AuthenticationParameters.builder().build()).getEntity(); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } - } - - @Test - public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - try { - resource.registerFunction(tenant, namespace, function, null, null, filePackageUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - @Test - public void testRegisterFunctionWithConflictingFields() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - String actualTenant = "DIFFERENT_TENANT"; - String actualNamespace = "DIFFERENT_NAMESPACE"; - String actualName = "DIFFERENT_NAME"; - this.namespaceList.add(actualTenant + "/" + actualNamespace); - - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - try { - resource.registerFunction(actualTenant, actualNamespace, actualName, null, null, filePackageUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - } - - public static FunctionConfig createDefaultFunctionConfig() { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - return functionConfig; - } - - public static FunctionDetails createDefaultFunctionDetails() { - FunctionConfig functionConfig = createDefaultFunctionConfig(); - return FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); - } } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionApiResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionApiResourceTest.java new file mode 100644 index 0000000000000..388331ce6f241 --- /dev/null +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionApiResourceTest.java @@ -0,0 +1,1374 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.functions.worker.rest.api.v3; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import com.google.common.collect.Lists; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Method; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import javax.ws.rs.core.Response; +import org.apache.distributedlog.api.namespace.Namespace; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.pulsar.broker.authentication.AuthenticationParameters; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; +import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.RestException; +import org.apache.pulsar.functions.api.Context; +import org.apache.pulsar.functions.api.Function; +import org.apache.pulsar.functions.proto.Function.FunctionDetails; +import org.apache.pulsar.functions.proto.Function.FunctionMetaData; +import org.apache.pulsar.functions.proto.Function.SubscriptionType; +import org.apache.pulsar.functions.source.TopicSchema; +import org.apache.pulsar.functions.utils.FunctionConfigUtils; +import org.apache.pulsar.functions.worker.WorkerConfig; +import org.apache.pulsar.functions.worker.WorkerUtils; +import org.glassfish.jersey.media.multipart.FormDataContentDisposition; +import org.mockito.Mockito; +import org.testng.Assert; +import org.testng.annotations.Test; + +public abstract class AbstractFunctionApiResourceTest extends AbstractFunctionsResourceTest { + @Override + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + if (method.getName().contains("Upload")) { + workerConfig.setFunctionsWorkerEnablePackageManagement(false); + } + } + + @Test + public void testListFunctionsSuccess() { + mockInstanceUtils(); + final List functions = Lists.newArrayList("test-1", "test-2"); + final List metaDataList = new LinkedList<>(); + FunctionMetaData functionMetaData1 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder().setName("test-1").build() + ).build(); + FunctionMetaData functionMetaData2 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder().setName("test-2").build() + ).build(); + metaDataList.add(functionMetaData1); + metaDataList.add(functionMetaData2); + when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(metaDataList); + + List functionList = listDefaultFunctions(); + assertEquals(functions, functionList); + } + + @Test + public void testOnlyGetSources() { + List functions = Lists.newArrayList("test-2"); + List functionMetaDataList = new LinkedList<>(); + FunctionMetaData f1 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder() + .setName("test-1") + .setComponentType(FunctionDetails.ComponentType.SOURCE) + .build()).build(); + functionMetaDataList.add(f1); + FunctionMetaData f2 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder() + .setName("test-2") + .setComponentType(FunctionDetails.ComponentType.FUNCTION) + .build()).build(); + functionMetaDataList.add(f2); + FunctionMetaData f3 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder() + .setName("test-3") + .setComponentType(FunctionDetails.ComponentType.SINK) + .build()).build(); + functionMetaDataList.add(f3); + when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(functionMetaDataList); + + List functionList = listDefaultFunctions(); + assertEquals(functions, functionList); + } + + private static final class TestFunction implements Function { + + @Override + public String process(String input, Context context) { + return input; + } + } + + private static final class WrongFunction implements Consumer { + @Override + public void accept(String s) { + + } + } + + protected static final String function = "test-function"; + protected static final String outputTopic = "test-output-topic"; + protected static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; + protected static final String className = TestFunction.class.getName(); + protected SubscriptionType subscriptionType = SubscriptionType.FAILOVER; + protected FunctionMetaData mockedFunctionMetadata; + + + @Override + protected void doSetup() { + this.mockedFunctionMetadata = + FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetadata); + } + + @Override + protected FunctionDetails.ComponentType getComponentType() { + return FunctionDetails.ComponentType.FUNCTION; + } + + + abstract protected void registerFunction(String tenant, String namespace, String function, InputStream inputStream, + FormDataContentDisposition details, String functionPkgUrl, FunctionConfig functionConfig) + throws IOException; + abstract protected void updateFunction(String tenant, + String namespace, + String functionName, + InputStream uploadedInputStream, + FormDataContentDisposition fileDetail, + String functionPkgUrl, + FunctionConfig functionConfig, + AuthenticationParameters authParams, + UpdateOptionsImpl updateOptions) throws IOException; + + abstract protected File downloadFunction(final String path, final AuthenticationParameters authParams) + throws IOException; + + abstract protected void testDeregisterFunctionMissingArguments( + String tenant, + String namespace, + String function + ); + + abstract protected void deregisterDefaultFunction(); + + abstract protected void testGetFunctionMissingArguments( + String tenant, + String namespace, + String function + ) throws IOException; + + abstract protected void testListFunctionsMissingArguments( + String tenant, + String namespace + ); + + abstract protected List listDefaultFunctions(); + + // + // Register Functions + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testRegisterFunctionMissingTenant() throws IOException { + try { + testRegisterFunctionMissingArguments( + null, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testRegisterFunctionMissingNamespace() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + null, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testRegisterFunctionMissingFunctionName() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + null, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package is not " + + "provided") + public void testRegisterFunctionMissingPackage() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "No input topic\\(s\\) " + + "specified for the function") + public void testRegisterFunctionMissingInputTopics() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + null, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function Package is not " + + "provided") + public void testRegisterFunctionMissingPackageDetails() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + null, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, + expectedExceptionsMessageRegExp = "Function class name is not provided.") + public void testRegisterFunctionMissingClassName() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class UnknownClass " + + "must be in class path") + public void testRegisterFunctionWrongClassName() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + "UnknownClass", + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function parallelism must be a" + + " positive number") + public void testRegisterFunctionWrongParallelism() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + -2, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, + expectedExceptionsMessageRegExp = "Output topic persistent://public/default/test_src is also being used " + + "as an input topic \\(topics must be one or the other\\)") + public void testRegisterFunctionSameInputOutput() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + topicsToSerDeClassName.keySet().iterator().next(), + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Output topic " + function + + "-output-topic/test:" + " is invalid") + public void testRegisterFunctionWrongOutputTopic() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + function + "-output-topic/test:", + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Encountered error .*. when " + + "getting Function package from .*") + public void testRegisterFunctionHttpUrl() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + null, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "http://localhost:1234/test"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class .*. does not " + + "implement the correct interface") + public void testRegisterFunctionImplementWrongInterface() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + WrongFunction.class.getName(), + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + private void testRegisterFunctionMissingArguments( + String tenant, + String namespace, + String function, + InputStream inputStream, + Map topicsToSerDeClassName, + FormDataContentDisposition details, + String outputTopic, + String outputSerdeClassName, + String className, + Integer parallelism, + String functionPkgUrl) throws IOException { + FunctionConfig functionConfig = new FunctionConfig(); + if (tenant != null) { + functionConfig.setTenant(tenant); + } + if (namespace != null) { + functionConfig.setNamespace(namespace); + } + if (function != null) { + functionConfig.setName(function); + } + if (topicsToSerDeClassName != null) { + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + } + if (outputTopic != null) { + functionConfig.setOutput(outputTopic); + } + if (outputSerdeClassName != null) { + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + } + if (className != null) { + functionConfig.setClassName(className); + } + if (parallelism != null) { + functionConfig.setParallelism(parallelism); + } + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + + registerFunction(tenant, namespace, function, inputStream, details, functionPkgUrl, functionConfig); + + } + + @Test(expectedExceptions = Exception.class, expectedExceptionsMessageRegExp = "Function config is not provided") + public void testUpdateMissingFunctionConfig() throws IOException { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateFunction( + tenant, + namespace, + function, + mockedInputStream, + mockedFormData, + null, + null, + null, null); + } + + + private void registerDefaultFunction() throws IOException { + registerDefaultFunctionWithPackageUrl(null); + } + + private void registerDefaultFunctionWithPackageUrl(String packageUrl) throws IOException { + FunctionConfig functionConfig = createDefaultFunctionConfig(); + registerFunction(tenant, namespace, function, mockedInputStream, mockedFormData, packageUrl, functionConfig); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function already" + + " exists") + public void testRegisterExistedFunction() throws IOException { + try { + Configurator.setRootLevel(Level.DEBUG); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") + public void testRegisterFunctionUploadFailure() throws IOException { + try { + mockWorkerUtils(ctx -> { + ctx.when(() -> { + WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class)); + } + ).thenThrow(new IOException("upload failure")); + }); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + @Test + public void testRegisterFunctionSuccess() throws IOException { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(timeOut = 20000) + public void testRegisterFunctionSuccessWithPackageName() throws IOException { + registerDefaultFunctionWithPackageUrl("function://public/default/test@v1"); + } + + @Test(timeOut = 20000) + public void testRegisterFunctionFailedWithWrongPackageName() throws PulsarAdminException, IOException { + try { + doThrow(new PulsarAdminException("package name is invalid")) + .when(mockedPackages).download(anyString(), anyString()); + registerDefaultFunctionWithPackageUrl("function://"); + } catch (RestException e) { + // expected exception + assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace does not exist") + public void testRegisterFunctionNonExistingNamespace() throws IOException { + try { + this.namespaceList.clear(); + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant does not exist") + public void testRegisterFunctionNonexistantTenant() throws Exception { + try { + when(mockedTenants.getTenantInfo(any())).thenThrow(PulsarAdminException.NotFoundException.class); + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") + public void testRegisterFunctionFailure() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + doThrow(new IllegalArgumentException("function failed to register")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registration " + + "interrupted") + public void testRegisterFunctionInterrupted() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + doThrow(new IllegalStateException("Function registration interrupted")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + // + // Update Functions + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testUpdateFunctionMissingTenant() throws Exception { + try { + testUpdateFunctionMissingArguments( + null, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Tenant is not provided"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testUpdateFunctionMissingNamespace() throws Exception { + try { + testUpdateFunctionMissingArguments( + tenant, + null, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Namespace is not provided"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testUpdateFunctionMissingFunctionName() throws Exception { + try { + testUpdateFunctionMissingArguments( + tenant, + namespace, + null, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Function name is not provided"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") + public void testUpdateFunctionMissingPackage() throws Exception { + try { + mockWorkerUtils(); + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Update contains no change"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") + public void testUpdateFunctionMissingInputTopic() throws Exception { + try { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + null, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Update contains no change"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") + public void testUpdateFunctionMissingClassName() throws Exception { + try { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism, + "Update contains no change"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testUpdateFunctionChangedParallelism() throws Exception { + try { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism + 1, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testUpdateFunctionChangedInputs() throws Exception { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + "DifferentOutput", + outputSerdeClassName, + null, + parallelism, + null); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Input Topics cannot be altered") + public void testUpdateFunctionChangedOutput() throws Exception { + try { + mockWorkerUtils(); + + Map someOtherInput = new HashMap<>(); + someOtherInput.put("DifferentTopic", TopicSchema.DEFAULT_SERDE); + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + someOtherInput, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism, + "Input Topics cannot be altered"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + private void testUpdateFunctionMissingArguments( + String tenant, + String namespace, + String function, + InputStream inputStream, + Map topicsToSerDeClassName, + FormDataContentDisposition details, + String outputTopic, + String outputSerdeClassName, + String className, + Integer parallelism, + String expectedError) throws Exception { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + FunctionConfig functionConfig = new FunctionConfig(); + if (tenant != null) { + functionConfig.setTenant(tenant); + } + if (namespace != null) { + functionConfig.setNamespace(namespace); + } + if (function != null) { + functionConfig.setName(function); + } + if (topicsToSerDeClassName != null) { + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + } + if (outputTopic != null) { + functionConfig.setOutput(outputTopic); + } + if (outputSerdeClassName != null) { + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + } + if (className != null) { + functionConfig.setClassName(className); + } + if (parallelism != null) { + functionConfig.setParallelism(parallelism); + } + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + + if (expectedError != null) { + doThrow(new IllegalArgumentException(expectedError)) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + } + + updateFunction( + tenant, + namespace, + function, + inputStream, + details, + null, + functionConfig, + null, null); + + } + + private void updateDefaultFunction() throws IOException { + updateDefaultFunctionWithPackageUrl(null); + } + + private void updateDefaultFunctionWithPackageUrl(String packageUrl) throws IOException { + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + + updateFunction( + tenant, + namespace, + function, + mockedInputStream, + mockedFormData, + packageUrl, + functionConfig, + null, null); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't" + + " exist") + public void testUpdateNotExistedFunction() throws IOException { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") + public void testUpdateFunctionUploadFailure() throws Exception { + try { + mockWorkerUtils(ctx -> { + ctx.when(() -> { + WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class)); + + }).thenThrow(new IOException("upload failure")); + }); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + @Test + public void testUpdateFunctionSuccess() throws Exception { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateDefaultFunction(); + } + + @Test + public void testUpdateFunctionWithUrl() throws IOException { + Configurator.setRootLevel(Level.DEBUG); + + String fileLocation = FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + String filePackageUrl = "file://" + fileLocation; + + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateFunction( + tenant, + namespace, + function, + null, + null, + filePackageUrl, + functionConfig, + null, null); + + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") + public void testUpdateFunctionFailure() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalArgumentException("function failed to register")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registeration " + + "interrupted") + public void testUpdateFunctionInterrupted() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalStateException("Function registeration interrupted")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + + @Test(timeOut = 20000) + public void testUpdateFunctionSuccessWithPackageName() throws IOException { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + updateDefaultFunctionWithPackageUrl("function://public/default/test@v1"); + } + + @Test(timeOut = 20000) + public void testUpdateFunctionFailedWithWrongPackageName() throws PulsarAdminException, IOException { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + try { + doThrow(new PulsarAdminException("package name is invalid")) + .when(mockedPackages).download(anyString(), anyString()); + registerDefaultFunctionWithPackageUrl("function://"); + } catch (RestException e) { + // expected exception + assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + } + } + + // + // deregister function + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testDeregisterFunctionMissingTenant() { + try { + + testDeregisterFunctionMissingArguments( + null, + namespace, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testDeregisterFunctionMissingNamespace() { + try { + testDeregisterFunctionMissingArguments( + tenant, + null, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testDeregisterFunctionMissingFunctionName() { + try { + testDeregisterFunctionMissingArguments( + tenant, + namespace, + null + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't" + + " exist") + public void testDeregisterNotExistedFunction() { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + deregisterDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); + throw re; + } + } + + @Test + public void testDeregisterFunctionSuccess() { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + deregisterDefaultFunction(); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to deregister") + public void testDeregisterFunctionFailure() throws Exception { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalArgumentException("function failed to deregister")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + deregisterDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function deregisteration " + + "interrupted") + public void testDeregisterFunctionInterrupted() throws Exception { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalStateException("Function deregisteration interrupted")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + deregisterDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + // + // Get Function Info + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testGetFunctionMissingTenant() throws IOException { + try { + testGetFunctionMissingArguments( + null, + namespace, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testGetFunctionMissingNamespace() throws IOException { + try { + testGetFunctionMissingArguments( + tenant, + null, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testGetFunctionMissingFunctionName() throws IOException { + try { + testGetFunctionMissingArguments( + tenant, + namespace, + null + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + // + // List Functions + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testListFunctionsMissingTenant() { + try { + testListFunctionsMissingArguments( + null, + namespace + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testListFunctionsMissingNamespace() { + try { + testListFunctionsMissingArguments( + tenant, + null + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testDownloadFunctionHttpUrl() throws Exception { + String jarHttpUrl = + "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; + File pkgFile = downloadFunction(jarHttpUrl, null); + pkgFile.delete(); + } + + @Test + public void testDownloadFunctionFile() throws Exception { + File file = getPulsarApiExamplesNar(); + File pkgFile = downloadFunction(file.toURI().toString(), null); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testDownloadFunctionBuiltinConnector() throws Exception { + File file = getPulsarApiExamplesNar(); + + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(config); + + registerBuiltinConnector("cassandra", file); + + File pkgFile = downloadFunction("builtin://cassandra", null); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testDownloadFunctionBuiltinFunction() throws Exception { + File file = getPulsarApiExamplesNar(); + + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(config); + + registerBuiltinFunction("exclamation", file); + + File pkgFile = downloadFunction("builtin://exclamation", null); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { + Configurator.setRootLevel(Level.DEBUG); + + File file = getPulsarApiExamplesNar(); + String filePackageUrl = file.toURI().toString(); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig); + + } + + @Test + public void testRegisterFunctionWithConflictingFields() throws Exception { + Configurator.setRootLevel(Level.DEBUG); + String actualTenant = "DIFFERENT_TENANT"; + String actualNamespace = "DIFFERENT_NAMESPACE"; + String actualName = "DIFFERENT_NAME"; + this.namespaceList.add(actualTenant + "/" + actualNamespace); + + File file = getPulsarApiExamplesNar(); + String filePackageUrl = file.toURI().toString(); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); + + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + registerFunction(actualTenant, actualNamespace, actualName, null, null, filePackageUrl, functionConfig); + } + + public static FunctionConfig createDefaultFunctionConfig() { + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + return functionConfig; + } + + public static FunctionDetails createDefaultFunctionDetails() { + FunctionConfig functionConfig = createDefaultFunctionConfig(); + return FunctionConfigUtils.convert(functionConfig); + } +} diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionsResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionsResourceTest.java new file mode 100644 index 0000000000000..51ca4c83f9e02 --- /dev/null +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionsResourceTest.java @@ -0,0 +1,337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.worker.rest.api.v3; + +import static org.apache.pulsar.functions.source.TopicSchema.DEFAULT_SERDE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.lang.reflect.Method; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Consumer; +import org.apache.distributedlog.api.namespace.Namespace; +import org.apache.pulsar.client.admin.Functions; +import org.apache.pulsar.client.admin.Namespaces; +import org.apache.pulsar.client.admin.Packages; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.Tenants; +import org.apache.pulsar.common.functions.FunctionDefinition; +import org.apache.pulsar.common.io.ConnectorDefinition; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.functions.instance.InstanceUtils; +import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.functions.runtime.RuntimeFactory; +import org.apache.pulsar.functions.source.TopicSchema; +import org.apache.pulsar.functions.utils.LoadedFunctionPackage; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; +import org.apache.pulsar.functions.utils.functions.FunctionArchive; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.apache.pulsar.functions.utils.io.Connector; +import org.apache.pulsar.functions.utils.io.ConnectorUtils; +import org.apache.pulsar.functions.worker.ConnectorsManager; +import org.apache.pulsar.functions.worker.FunctionMetaDataManager; +import org.apache.pulsar.functions.worker.FunctionRuntimeManager; +import org.apache.pulsar.functions.worker.FunctionsManager; +import org.apache.pulsar.functions.worker.LeaderService; +import org.apache.pulsar.functions.worker.PackageUrlValidator; +import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; +import org.apache.pulsar.functions.worker.WorkerUtils; +import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; +import org.glassfish.jersey.media.multipart.FormDataContentDisposition; +import org.mockito.Answers; +import org.mockito.MockSettings; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; + +public abstract class AbstractFunctionsResourceTest { + + protected static final String tenant = "test-tenant"; + protected static final String namespace = "test-namespace"; + protected static final Map topicsToSerDeClassName = new HashMap<>(); + protected static final String subscriptionName = "test-subscription"; + protected static final String CASSANDRA_STRING_SINK = "org.apache.pulsar.io.cassandra.CassandraStringSink"; + protected static final int parallelism = 1; + private static final String SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH = "pulsar-io-cassandra.nar.path"; + private static final String SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH = "pulsar-io-twitter.nar.path"; + private static final String SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH = "pulsar-io-invalid.nar.path"; + private static final String SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH = + "pulsar-functions-api-examples.nar.path"; + protected static Map mockStaticContexts = new HashMap<>(); + + static { + topicsToSerDeClassName.put("test_src", DEFAULT_SERDE); + topicsToSerDeClassName.put("persistent://public/default/test_src", TopicSchema.DEFAULT_SERDE); + } + + protected PulsarWorkerService mockedWorkerService; + protected PulsarAdmin mockedPulsarAdmin; + protected Tenants mockedTenants; + protected Namespaces mockedNamespaces; + protected Functions mockedFunctions; + protected TenantInfoImpl mockedTenantInfo; + protected List namespaceList = new LinkedList<>(); + protected FunctionMetaDataManager mockedManager; + protected FunctionRuntimeManager mockedFunctionRunTimeManager; + protected RuntimeFactory mockedRuntimeFactory; + protected Namespace mockedNamespace; + protected InputStream mockedInputStream; + protected FormDataContentDisposition mockedFormData; + protected Function.FunctionMetaData mockedFunctionMetaData; + protected LeaderService mockedLeaderService; + protected Packages mockedPackages; + protected PulsarFunctionTestTemporaryDirectory tempDirectory; + protected ConnectorsManager connectorsManager = new ConnectorsManager(); + protected FunctionsManager functionsManager = new FunctionsManager(); + + public static File getPulsarIOCassandraNar() { + return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH) + , "pulsar-io-cassandra.nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH + " system property")); + } + + public static File getPulsarIOTwitterNar() { + return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH) + , "pulsar-io-twitter.nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH + " system property")); + } + + public static File getPulsarIOInvalidNar() { + return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH) + , "invalid nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH + " system property")); + } + + public static File getPulsarApiExamplesNar() { + return new File(Objects.requireNonNull( + System.getProperty(SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH) + , "pulsar-functions-api-examples.nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH + " system property")); + } + + @BeforeMethod + public final void setup(Method method) throws Exception { + this.mockedManager = mock(FunctionMetaDataManager.class); + this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); + this.mockedRuntimeFactory = mock(RuntimeFactory.class); + this.mockedInputStream = mock(InputStream.class); + this.mockedNamespace = mock(Namespace.class); + this.mockedFormData = mock(FormDataContentDisposition.class); + when(mockedFormData.getFileName()).thenReturn("test"); + this.mockedTenantInfo = mock(TenantInfoImpl.class); + this.mockedPulsarAdmin = mock(PulsarAdmin.class); + this.mockedTenants = mock(Tenants.class); + this.mockedNamespaces = mock(Namespaces.class); + this.mockedFunctions = mock(Functions.class); + this.mockedLeaderService = mock(LeaderService.class); + this.mockedPackages = mock(Packages.class); + namespaceList.add(tenant + "/" + namespace); + + this.mockedWorkerService = mock(PulsarWorkerService.class); + when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); + when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); + when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); + when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); + when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); + when(mockedWorkerService.isInitialized()).thenReturn(true); + when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); + when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); + when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); + when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); + when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); + when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); + when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); + when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); + when(mockedLeaderService.isLeader()).thenReturn(true); + doAnswer(invocationOnMock -> { + Files.copy(getDefaultNarFile().toPath(), Paths.get(invocationOnMock.getArgument(1, String.class)), + StandardCopyOption.REPLACE_EXISTING); + return null; + }).when(mockedPackages).download(any(), any()); + + // worker config + List urlPatterns = + List.of("http://localhost.*", "file:.*", "https://repo1.maven.org/maven2/org/apache/pulsar/.*"); + WorkerConfig workerConfig = new WorkerConfig() + .setWorkerId("test") + .setWorkerPort(8080) + .setFunctionMetadataTopicName("pulsar/functions") + .setNumFunctionPackageReplicas(3) + .setPulsarServiceUrl("pulsar://localhost:6650/") + .setAdditionalEnabledFunctionsUrlPatterns(urlPatterns) + .setAdditionalEnabledConnectorUrlPatterns(urlPatterns) + .setFunctionsWorkerEnablePackageManagement(true); + customizeWorkerConfig(workerConfig, method); + tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); + tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); + when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); + PackageUrlValidator packageUrlValidator = new PackageUrlValidator(workerConfig); + when(mockedWorkerService.getPackageUrlValidator()).thenReturn(packageUrlValidator); + + doSetup(); + } + + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + + } + + protected File getDefaultNarFile() { + return getPulsarIOTwitterNar(); + } + + protected void doSetup() throws Exception { + + } + + @AfterMethod(alwaysRun = true) + public void cleanup() { + if (tempDirectory != null) { + tempDirectory.delete(); + } + mockStaticContexts.values().forEach(MockedStatic::close); + mockStaticContexts.clear(); + } + + protected void mockStatic(Class classStatic, Consumer> consumer) { + mockStatic(classStatic, withSettings().defaultAnswer(Mockito.CALLS_REAL_METHODS), consumer); + } + + private void mockStatic(Class classStatic, MockSettings mockSettings, Consumer> consumer) { + final MockedStatic mockedStatic = mockStaticContexts.computeIfAbsent(classStatic.getName(), + name -> Mockito.mockStatic(classStatic, mockSettings)); + consumer.accept(mockedStatic); + } + + protected void mockWorkerUtils() { + mockWorkerUtils(null); + } + + protected void mockWorkerUtils(Consumer> consumer) { + mockStatic(WorkerUtils.class, withSettings(), ctx -> { + // make dumpToTmpFile return the nar file copy + ctx.when(() -> WorkerUtils.dumpToTmpFile(mockedInputStream)) + .thenAnswer(invocation -> { + Path tempFile = Files.createTempFile("test", ".nar"); + Files.copy(getPulsarApiExamplesNar().toPath(), tempFile, + StandardCopyOption.REPLACE_EXISTING); + return tempFile.toFile(); + }); + ctx.when(() -> WorkerUtils.dumpToTmpFile(any())) + .thenAnswer(Answers.CALLS_REAL_METHODS); + if (consumer != null) { + consumer.accept(ctx); + } + }); + } + + protected void mockInstanceUtils() { + mockStatic(InstanceUtils.class, ctx -> { + ctx.when(() -> InstanceUtils.calculateSubjectType(any())) + .thenReturn(getComponentType()); + }); + } + + protected abstract Function.FunctionDetails.ComponentType getComponentType(); + + public static class LoadedConnector extends Connector { + public LoadedConnector(ConnectorDefinition connectorDefinition) { + super(null, connectorDefinition, null, true); + } + + @Override + public ValidatableFunctionPackage getConnectorFunctionPackage() { + return new LoadedFunctionPackage(getClass().getClassLoader(), ConnectorDefinition.class, + getConnectorDefinition()); + } + } + + + protected void registerBuiltinConnector(String connectorType, String className) { + ConnectorDefinition connectorDefinition = null; + if (className != null) { + connectorDefinition = new ConnectorDefinition(); + // set source and sink class to the same to simplify the test + connectorDefinition.setSinkClass(className); + connectorDefinition.setSourceClass(className); + } + connectorsManager.addConnector(connectorType, new LoadedConnector(connectorDefinition)); + } + + protected void registerBuiltinConnector(String connectorType, File packageFile) { + ConnectorDefinition cntDef; + try { + cntDef = ConnectorUtils.getConnectorDefinition(packageFile); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + connectorsManager.addConnector(connectorType, + new Connector(packageFile.toPath(), cntDef, NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR, true)); + } + + public static class LoadedFunctionArchive extends FunctionArchive { + public LoadedFunctionArchive(FunctionDefinition functionDefinition) { + super(null, functionDefinition, null, true); + } + + @Override + public ValidatableFunctionPackage getFunctionPackage() { + return new LoadedFunctionPackage(getClass().getClassLoader(), FunctionDefinition.class, + getFunctionDefinition()); + } + } + + protected void registerBuiltinFunction(String functionType, String className) { + FunctionDefinition functionDefinition = null; + if (className != null) { + functionDefinition = new FunctionDefinition(); + functionDefinition.setFunctionClass(className); + } + functionsManager.addFunction(functionType, new LoadedFunctionArchive(functionDefinition)); + } + + protected void registerBuiltinFunction(String functionType, File packageFile) { + FunctionDefinition cntDef; + try { + cntDef = FunctionUtils.getFunctionDefinition(packageFile); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + functionsManager.addFunction(functionType, + new FunctionArchive(packageFile.toPath(), cntDef, NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR, true)); + } +} diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java index 337999bf2ad5e..a1a418460be45 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java @@ -18,552 +18,49 @@ */ package org.apache.pulsar.functions.worker.rest.api.v3; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import com.google.common.collect.Lists; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.net.URL; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.UUID; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.Packages; -import org.apache.pulsar.client.admin.PulsarAdmin; -import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; import org.apache.pulsar.common.functions.FunctionConfig; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.util.RestException; -import org.apache.pulsar.functions.api.Context; -import org.apache.pulsar.functions.api.Function; -import org.apache.pulsar.functions.instance.InstanceUtils; -import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.proto.Function.FunctionMetaData; -import org.apache.pulsar.functions.proto.Function.PackageLocationMetaData; -import org.apache.pulsar.functions.proto.Function.ProcessingGuarantees; -import org.apache.pulsar.functions.proto.Function.SinkSpec; -import org.apache.pulsar.functions.proto.Function.SourceSpec; -import org.apache.pulsar.functions.proto.Function.SubscriptionType; -import org.apache.pulsar.functions.runtime.RuntimeFactory; -import org.apache.pulsar.functions.source.TopicSchema; -import org.apache.pulsar.functions.utils.FunctionCommon; +import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.utils.FunctionConfigUtils; -import org.apache.pulsar.functions.utils.functions.FunctionArchive; -import org.apache.pulsar.functions.utils.io.Connector; -import org.apache.pulsar.functions.worker.ConnectorsManager; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.FunctionsManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.rest.api.FunctionsImpl; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; -import org.apache.pulsar.functions.worker.rest.api.v2.FunctionsApiV2Resource; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; -import org.mockito.MockedStatic; -import org.mockito.Mockito; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -/** - * Unit test of {@link FunctionsApiV2Resource}. - */ -public class FunctionApiV3ResourceTest { - - private static final class TestFunction implements Function { - - @Override - public String process(String input, Context context) { - return input; - } - } - - private static final class WrongFunction implements Consumer { - @Override - public void accept(String s) { - - } - } - - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; - private static final String function = "test-function"; - private static final String outputTopic = "test-output-topic"; - private static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; - private static final String className = TestFunction.class.getName(); - private SubscriptionType subscriptionType = SubscriptionType.FAILOVER; - private static final Map topicsToSerDeClassName = new HashMap<>(); - static { - topicsToSerDeClassName.put("persistent://public/default/test_src", TopicSchema.DEFAULT_SERDE); - } - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; +public class FunctionApiV3ResourceTest extends AbstractFunctionApiResourceTest { private FunctionsImpl resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetadata; - private LeaderService mockedLeaderService; - private Packages mockedPackages; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - private static Map mockStaticContexts = new HashMap<>(); - - private static final String SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH = - "pulsar-functions-api-examples.nar.path"; - - public static File getPulsarApiExamplesNar() { - return new File(Objects.requireNonNull( - System.getProperty(SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH) - , "pulsar-functions-api-examples.nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH + " system property")); - } - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedPackages = mock(Packages.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedFunctionMetadata = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetadata); - doNothing().when(mockedPackages).download(anyString(), anyString()); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); - + @Override + protected void doSetup() { + super.doSetup(); this.resource = spy(new FunctionsImpl(() -> mockedWorkerService)); } - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); - } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); - } - - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - private void mockWorkerUtils() { - mockWorkerUtils(null); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); - } - - private void mockInstanceUtils() { - mockStatic(InstanceUtils.class, ctx -> { - ctx.when(() -> InstanceUtils.calculateSubjectType(any())) - .thenReturn(FunctionDetails.ComponentType.FUNCTION); - }); - } - - // - // Register Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testRegisterFunctionMissingTenant() { - try { - testRegisterFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testRegisterFunctionMissingNamespace() { - try { - testRegisterFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testRegisterFunctionMissingFunctionName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package is not provided") - public void testRegisterFunctionMissingPackage() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "No input topic\\(s\\) specified for the function") - public void testRegisterFunctionMissingInputTopics() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function Package is not provided") - public void testRegisterFunctionMissingPackageDetails() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package does not have" - + " the correct format. Pulsar cannot determine if the package is a NAR package or JAR package. Function " - + "classname is not provided and attempts to load it as a NAR package produced the following error.*") - public void testRegisterFunctionMissingClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class UnknownClass must be in class path") - public void testRegisterFunctionWrongClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - "UnknownClass", - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function parallelism must be a positive number") - public void testRegisterFunctionWrongParallelism() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - -2, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, - expectedExceptionsMessageRegExp = "Output topic persistent://public/default/test_src is also being used as an input topic \\(topics must be one or the other\\)") - public void testRegisterFunctionSameInputOutput() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - topicsToSerDeClassName.keySet().iterator().next(), - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Output topic " + function + "-output-topic/test:" + " is invalid") - public void testRegisterFunctionWrongOutputTopic() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - function + "-output-topic/test:", - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Encountered error .*. when getting Function package from .*") - public void testRegisterFunctionHttpUrl() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "http://localhost:1234/test"); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class .*. does not implement the correct interface") - public void testRegisterFunctionImplementWrongInterface() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - WrongFunction.class.getName(), - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testRegisterFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String functionPkgUrl) { - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - + protected void registerFunction(String tenant, String namespace, String function, InputStream inputStream, + FormDataContentDisposition details, String functionPkgUrl, FunctionConfig functionConfig) { resource.registerFunction( tenant, namespace, @@ -573,1143 +70,116 @@ private void testRegisterFunctionMissingArguments( functionPkgUrl, functionConfig, null); - - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function config is not provided") - public void testMissingFunctionConfig() { - resource.registerFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - null, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function config is not provided") - public void testUpdateMissingFunctionConfig() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - resource.updateFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - null, - null, null); - } - - private void registerDefaultFunction() { - registerDefaultFunctionWithPackageUrl(null); - } - - private void registerDefaultFunctionWithPackageUrl(String packageUrl) { - FunctionConfig functionConfig = createDefaultFunctionConfig(); - resource.registerFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - packageUrl, - functionConfig, - null); } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function already exists") - public void testRegisterExistedFunction() { - try { - Configurator.setRootLevel(Level.DEBUG); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testRegisterFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> { - WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class)); - } - ).thenThrow(new IOException("upload failure")); - ; - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testRegisterFunctionSuccess() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(timeOut = 20000) - public void testRegisterFunctionSuccessWithPackageName() { - registerDefaultFunctionWithPackageUrl("function://public/default/test@v1"); - } - - @Test(timeOut = 20000) - public void testRegisterFunctionFailedWithWrongPackageName() throws PulsarAdminException { - try { - doThrow(new PulsarAdminException("package name is invalid")) - .when(mockedPackages).download(anyString(), anyString()); - registerDefaultFunctionWithPackageUrl("function://"); - } catch (RestException e) { - // expected exception - assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - } + protected void updateFunction(String tenant, + String namespace, + String functionName, + InputStream uploadedInputStream, + FormDataContentDisposition fileDetail, + String functionPkgUrl, + FunctionConfig functionConfig, + AuthenticationParameters authParams, + UpdateOptionsImpl updateOptions) { + resource.updateFunction(tenant, namespace, functionName, uploadedInputStream, fileDetail, functionPkgUrl, + functionConfig, authParams, updateOptions); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace does not exist") - public void testRegisterFunctionNonExistingNamespace() { - try { - this.namespaceList.clear(); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant does not exist") - public void testRegisterFunctionNonexistantTenant() throws Exception { - try { - when(mockedTenants.getTenantInfo(any())).thenThrow(PulsarAdminException.NotFoundException.class); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testRegisterFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registration interrupted") - public void testRegisterFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - doThrow(new IllegalStateException("Function registration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } + protected StreamingOutput downloadFunction(String tenant, String namespace, String componentName, + AuthenticationParameters authParams, boolean transformFunction) { + return resource.downloadFunction(tenant, namespace, componentName, authParams, transformFunction); } - - /* - Externally managed runtime, - uploadBuiltinSinksSources == false - Make sure uploadFileToBookkeeper is not called - */ - @Test - public void testRegisterFunctionSuccessK8sNoUpload() throws Exception { - mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(false); - - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new RuntimeException("uploadFileToBookkeeper triggered")); - - }); - - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getFunctionTypes(any(FunctionConfig.class), any(Class.class))).thenReturn(new Class[]{String.class, String.class}); - ctx.when(() -> FunctionCommon.convertRuntime(any(FunctionConfig.Runtime.class))).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - - }); - - doReturn(Function.class).when(mockedClassLoader).loadClass(anyString()); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedFunctionsManager.getFunctionArchive(any())).thenReturn(getPulsarApiExamplesNar().toPath()); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - - when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - FunctionConfig functionConfig = createDefaultFunctionConfig(); - functionConfig.setJar("builtin://exclamation"); - - try (FileInputStream inputStream = new FileInputStream(getPulsarApiExamplesNar())) { - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - mockedFormData, - null, - functionConfig, - null); - } - } - - /* - Externally managed runtime, - uploadBuiltinSinksSources == true - Make sure uploadFileToBookkeeper is called - */ - @Test - public void testRegisterFunctionSuccessK8sWithUpload() throws Exception { - final String injectedErrMsg = "uploadFileToBookkeeper triggered"; - mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(true); - - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new RuntimeException(injectedErrMsg)); - - }); - - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getFunctionTypes(any(FunctionConfig.class), any(Class.class))).thenReturn(new Class[]{String.class, String.class}); - ctx.when(() -> FunctionCommon.convertRuntime(any(FunctionConfig.Runtime.class))).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - }); - - doReturn(Function.class).when(mockedClassLoader).loadClass(anyString()); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedFunctionsManager.getFunctionArchive(any())).thenReturn(getPulsarApiExamplesNar().toPath()); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - - when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - FunctionConfig functionConfig = createDefaultFunctionConfig(); - functionConfig.setJar("builtin://exclamation"); - - try (FileInputStream inputStream = new FileInputStream(getPulsarApiExamplesNar())) { - try { - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - mockedFormData, - null, - functionConfig, - null); - Assert.fail(); - } catch (RuntimeException e) { - Assert.assertEquals(e.getMessage(), injectedErrMsg); - } - } - } - - // - // Update Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testUpdateFunctionMissingTenant() throws Exception { - try { - testUpdateFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Tenant is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testUpdateFunctionMissingNamespace() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Namespace is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testUpdateFunctionMissingFunctionName() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Function name is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingPackage() throws Exception { - try { - mockWorkerUtils(); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingInputTopic() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingClassName() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test - public void testUpdateFunctionChangedParallelism() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism + 1, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test - public void testUpdateFunctionChangedInputs() throws Exception { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - "DifferentOutput", - outputSerdeClassName, - null, - parallelism, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Input Topics cannot be altered") - public void testUpdateFunctionChangedOutput() throws Exception { - try { - mockWorkerUtils(); - - Map someOtherInput = new HashMap<>(); - someOtherInput.put("DifferentTopic", TopicSchema.DEFAULT_SERDE); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - someOtherInput, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Input Topics cannot be altered"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testUpdateFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String expectedError) throws Exception { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - if (expectedError != null) { - doThrow(new IllegalArgumentException(expectedError)) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - } - - resource.updateFunction( - tenant, - namespace, - function, - inputStream, - details, - null, - functionConfig, - null, null); - - } - - private void updateDefaultFunction() { - updateDefaultFunctionWithPackageUrl(null); - } - - private void updateDefaultFunctionWithPackageUrl(String packageUrl) { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - - resource.updateFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - packageUrl, - functionConfig, - null, null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testUpdateNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testUpdateFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> { - WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class)); - - }).thenThrow(new IOException("upload failure")); - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testUpdateFunctionSuccess() throws Exception { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } - - @Test - public void testUpdateFunctionWithUrl() { - Configurator.setRootLevel(Level.DEBUG); - - String fileLocation = FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - String filePackageUrl = "file://" + fileLocation; - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - resource.updateFunction( - tenant, - namespace, - function, - null, - null, - filePackageUrl, - functionConfig, - null, null); - - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testUpdateFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registeration interrupted") - public void testUpdateFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function registeration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - - @Test(timeOut = 20000) - public void testUpdateFunctionSuccessWithPackageName() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - updateDefaultFunctionWithPackageUrl("function://public/default/test@v1"); - } - - @Test(timeOut = 20000) - public void testUpdateFunctionFailedWithWrongPackageName() throws PulsarAdminException { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - try { - doThrow(new PulsarAdminException("package name is invalid")) - .when(mockedPackages).download(anyString(), anyString()); - registerDefaultFunctionWithPackageUrl("function://"); - } catch (RestException e) { - // expected exception - assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - } - } - - // - // deregister function - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testDeregisterFunctionMissingTenant() { - try { - - testDeregisterFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testDeregisterFunctionMissingNamespace() { - try { - testDeregisterFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testDeregisterFunctionMissingFunctionName() { - try { - testDeregisterFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testDeregisterFunctionMissingArguments( - String tenant, - String namespace, - String function - ) { - resource.deregisterFunction( - tenant, - namespace, - function, - null); - } - - private void deregisterDefaultFunction() { - resource.deregisterFunction( - tenant, - namespace, - function, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testDeregisterNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); - throw re; - } - } - - @Test - public void testDeregisterFunctionSuccess() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - deregisterDefaultFunction(); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to deregister") - public void testDeregisterFunctionFailure() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to deregister")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function deregisteration interrupted") - public void testDeregisterFunctionInterrupted() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function deregisteration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // Get Function Info - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testGetFunctionMissingTenant() { - try { - testGetFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; + + protected File downloadFunction(final String path, final AuthenticationParameters authParams) throws IOException { + StreamingOutput streamingOutput = resource.downloadFunction(path, authParams); + File pkgFile = File.createTempFile("testpkg", "nar"); + try(OutputStream output = new FileOutputStream(pkgFile)) { + streamingOutput.write(output); } + return pkgFile; } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testGetFunctionMissingNamespace() { - try { - testGetFunctionMissingArguments( + protected void testDeregisterFunctionMissingArguments( + String tenant, + String namespace, + String function + ) { + resource.deregisterFunction( tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + namespace, + function, + null); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testGetFunctionMissingFunctionName() { - try { - testGetFunctionMissingArguments( + protected void deregisterDefaultFunction() { + resource.deregisterFunction( tenant, namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + function, + null); } - private void testGetFunctionMissingArguments( + protected void testGetFunctionMissingArguments( String tenant, String namespace, String function ) { resource.getFunctionInfo( - tenant, - namespace, - function,null + tenant, + namespace, + function, null ); } - private FunctionConfig getDefaultFunctionInfo() { + protected FunctionConfig getDefaultFunctionInfo() { return resource.getFunctionInfo( - tenant, - namespace, - function, - null - ); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testGetNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - getDefaultFunctionInfo(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); - throw re; - } - } - - @Test - public void testGetFunctionSuccess() { - mockInstanceUtils(); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - SinkSpec sinkSpec = SinkSpec.newBuilder() - .setTopic(outputTopic) - .setSerDeClassName(outputSerdeClassName).build(); - FunctionDetails functionDetails = FunctionDetails.newBuilder() - .setClassName(className) - .setSink(sinkSpec) - .setAutoAck(true) - .setName(function) - .setNamespace(namespace) - .setProcessingGuarantees(ProcessingGuarantees.ATMOST_ONCE) - .setTenant(tenant) - .setParallelism(parallelism) - .setSource(SourceSpec.newBuilder().setSubscriptionType(subscriptionType) - .putAllTopicsToSerDeClassName(topicsToSerDeClassName)).build(); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setCreateTime(System.currentTimeMillis()) - .setFunctionDetails(functionDetails) - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) - .setVersion(1234) - .build(); - when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - - FunctionConfig functionConfig = getDefaultFunctionInfo(); - assertEquals( - FunctionConfigUtils.convertFromDetails(functionDetails), - functionConfig); - } - - // - // List Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testListFunctionsMissingTenant() { - try { - testListFunctionsMissingArguments( - null, - namespace - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testListFunctionsMissingNamespace() { - try { - testListFunctionsMissingArguments( tenant, + namespace, + function, null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + ); } - private void testListFunctionsMissingArguments( + protected void testListFunctionsMissingArguments( String tenant, String namespace ) { resource.listFunctions( - tenant, - namespace,null + tenant, + namespace, null ); } - private List listDefaultFunctions() { + protected List listDefaultFunctions() { return resource.listFunctions( - tenant, - namespace,null + tenant, + namespace, null ); } - @Test - public void testListFunctionsSuccess() { - mockInstanceUtils(); - final List functions = Lists.newArrayList("test-1", "test-2"); - final List metaDataList = new LinkedList<>(); - FunctionMetaData functionMetaData1 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-1").build() - ).build(); - FunctionMetaData functionMetaData2 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-2").build() - ).build(); - metaDataList.add(functionMetaData1); - metaDataList.add(functionMetaData2); - when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(metaDataList); - - List functionList = listDefaultFunctions(); - assertEquals(functions, functionList); - } - - @Test - public void testOnlyGetSources() { - List functions = Lists.newArrayList("test-2"); - List functionMetaDataList = new LinkedList<>(); - FunctionMetaData f1 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder() - .setName("test-1") - .setComponentType(FunctionDetails.ComponentType.SOURCE) - .build()).build(); - functionMetaDataList.add(f1); - FunctionMetaData f2 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder() - .setName("test-2") - .setComponentType(FunctionDetails.ComponentType.FUNCTION) - .build()).build(); - functionMetaDataList.add(f2); - FunctionMetaData f3 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder() - .setName("test-3") - .setComponentType(FunctionDetails.ComponentType.SINK) - .build()).build(); - functionMetaDataList.add(f3); - when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(functionMetaDataList); - - List functionList = listDefaultFunctions(); - assertEquals(functions, functionList); - } - - @Test - public void testDownloadFunctionHttpUrl() throws Exception { - String jarHttpUrl = - "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - StreamingOutput streamOutput = resource.downloadFunction(jarHttpUrl, null); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - pkgFile.delete(); - } - - @Test - public void testDownloadFunctionFile() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - StreamingOutput streamOutput = resource.downloadFunction("file:///" + fileLocation, null); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); - } - - @Test - public void testDownloadFunctionBuiltinConnector() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); - when(mockedWorkerService.getWorkerConfig()).thenReturn(config); - - Connector connector = Connector.builder().archivePath(file.toPath()).build(); - ConnectorsManager connectorsManager = mock(ConnectorsManager.class); - when(connectorsManager.getConnector("cassandra")).thenReturn(connector); - when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); - - StreamingOutput streamOutput = resource.downloadFunction("builtin://cassandra", null); - - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - output.flush(); - output.close(); - Assert.assertTrue(pkgFile.exists()); - Assert.assertTrue(pkgFile.exists()); - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); - } - - @Test - public void testDownloadFunctionBuiltinFunction() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); - when(mockedWorkerService.getWorkerConfig()).thenReturn(config); - - FunctionsManager functionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); - when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); - - StreamingOutput streamOutput = resource.downloadFunction("builtin://exclamation", null); - - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - output.flush(); - output.close(); - Assert.assertTrue(pkgFile.exists()); - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); - } - @Test public void testDownloadFunctionBuiltinConnectorByName() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + File file = getPulsarApiExamplesNar(); WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(config); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("builtin://cassandra")) - .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) - .setFunctionDetails(FunctionDetails.newBuilder().setComponentType(FunctionDetails.ComponentType.SINK)) + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("builtin://cassandra")) + .setTransformFunctionPackageLocation( + Function.PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setFunctionDetails(Function.FunctionDetails.newBuilder().setComponentType(Function.FunctionDetails.ComponentType.SINK)) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - Connector connector = Connector.builder().archivePath(file.toPath()).build(); - ConnectorsManager connectorsManager = mock(ConnectorsManager.class); - when(connectorsManager.getConnector("cassandra")).thenReturn(connector); - when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); + registerBuiltinConnector("cassandra", file); - StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, + StreamingOutput streamOutput = downloadFunction(tenant, namespace, function, AuthenticationParameters.builder().build(), false); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); + File pkgFile = File.createTempFile("testpkg", "nar"); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); @@ -1719,31 +189,27 @@ public void testDownloadFunctionBuiltinConnectorByName() throws Exception { @Test public void testDownloadFunctionBuiltinFunctionByName() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + File file = getPulsarApiExamplesNar(); WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(config); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("builtin://exclamation")) - .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) - .setFunctionDetails(FunctionDetails.newBuilder().setComponentType(FunctionDetails.ComponentType.FUNCTION)) - .build(); + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("builtin://exclamation")) + .setTransformFunctionPackageLocation( + Function.PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setFunctionDetails( + Function.FunctionDetails.newBuilder().setComponentType(Function.FunctionDetails.ComponentType.FUNCTION)) + .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - FunctionsManager functionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); - when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + registerBuiltinFunction("exclamation", file); - StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, + StreamingOutput streamOutput = downloadFunction(tenant, namespace, function, AuthenticationParameters.builder().build(), false); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); + File pkgFile = File.createTempFile("testpkg", "nar"); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); @@ -1753,32 +219,26 @@ public void testDownloadFunctionBuiltinFunctionByName() throws Exception { @Test public void testDownloadTransformFunctionByName() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + File file = getPulsarApiExamplesNar(); WorkerConfig workerConfig = new WorkerConfig() - .setUploadBuiltinSinksSources(false); + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) - .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder() + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setTransformFunctionPackageLocation(Function.PackageLocationMetaData.newBuilder() .setPackagePath("builtin://exclamation")) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - FunctionsManager functionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); - when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + registerBuiltinFunction("exclamation", file); - StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, + StreamingOutput streamOutput = downloadFunction(tenant, namespace, function, AuthenticationParameters.builder().build(), true); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); + File pkgFile = File.createTempFile("testpkg", "nar"); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); @@ -1786,15 +246,58 @@ public void testDownloadTransformFunctionByName() throws Exception { pkgFile.delete(); } + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't" + + " exist") + public void testGetNotExistedFunction() throws IOException { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + getDefaultFunctionInfo(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); + throw re; + } + } @Test - public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { + public void testGetFunctionSuccess() throws IOException { + mockInstanceUtils(); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + Function.SinkSpec sinkSpec = Function.SinkSpec.newBuilder() + .setTopic(outputTopic) + .setSerDeClassName(outputSerdeClassName).build(); + Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder() + .setClassName(className) + .setSink(sinkSpec) + .setAutoAck(true) + .setName(function) + .setNamespace(namespace) + .setProcessingGuarantees(Function.ProcessingGuarantees.ATMOST_ONCE) + .setTenant(tenant) + .setParallelism(parallelism) + .setSource(Function.SourceSpec.newBuilder().setSubscriptionType(subscriptionType) + .putAllTopicsToSerDeClassName(topicsToSerDeClassName)).build(); + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setCreateTime(System.currentTimeMillis()) + .setFunctionDetails(functionDetails) + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) + .setVersion(1234) + .build(); + when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); + + FunctionConfig functionConfig = getDefaultFunctionInfo(); + assertEquals( + FunctionConfigUtils.convertFromDetails(functionDetails), + functionConfig); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function language runtime is " + + "either not set or cannot be determined") + public void testCreateFunctionWithoutSettingRuntime() throws Exception { Configurator.setRootLevel(Level.DEBUG); - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; + File file = getPulsarApiExamplesNar(); + String filePackageUrl = file.toURI().toString(); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); FunctionConfig functionConfig = new FunctionConfig(); @@ -1803,82 +306,135 @@ public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { functionConfig.setName(function); functionConfig.setClassName(className); functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); functionConfig.setOutput(outputTopic); functionConfig.setOutputSerdeClassName(outputSerdeClassName); - resource.registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig, null); + registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig); } @Test - public void testRegisterFunctionWithConflictingFields() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - String actualTenant = "DIFFERENT_TENANT"; - String actualNamespace = "DIFFERENT_NAMESPACE"; - String actualName = "DIFFERENT_NAME"; - this.namespaceList.add(actualTenant + "/" + actualNamespace); + public void testUpdateSourceWithNoChange() throws IOException { + mockWorkerUtils(); - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); + FunctionConfig funcConfig = createDefaultFunctionConfig(); - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - resource.registerFunction(actualTenant, actualNamespace, actualName, null, null, filePackageUrl, functionConfig, - null); + // config has not changes and don't update auth, should fail + try { + updateFunction( + funcConfig.getTenant(), + funcConfig.getNamespace(), + funcConfig.getName(), + null, + mockedFormData, + null, + funcConfig, + null, + null); + fail("Update without changes should fail"); + } catch (RestException e) { + assertThat(e.getMessage()).contains("Update contains no change"); + } + + try { + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(false); + updateFunction( + funcConfig.getTenant(), + funcConfig.getNamespace(), + funcConfig.getName(), + null, + mockedFormData, + null, + funcConfig, + null, + updateOptions); + fail("Update without changes should fail"); + } catch (RestException e) { + assertTrue(e.getMessage().contains("Update contains no change")); + } + + // no changes but set the auth-update flag to true, should not fail + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(true); + updateFunction( + funcConfig.getTenant(), + funcConfig.getNamespace(), + funcConfig.getName(), + null, + mockedFormData, + null, + funcConfig, + null, + updateOptions); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function language runtime is either not set or cannot be determined") - public void testCreateFunctionWithoutSettingRuntime() throws Exception { - Configurator.setRootLevel(Level.DEBUG); + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function config is not provided") + public void testMissingFunctionConfig() throws IOException { + registerFunction(tenant, namespace, function, mockedInputStream, mockedFormData, null, null); + } + + /* + Externally managed runtime, + uploadBuiltinSinksSources == false + Make sure uploadFileToBookkeeper is not called + */ + @Test + public void testRegisterFunctionSuccessK8sNoUpload() throws Exception { + mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(false); + + mockStatic(WorkerUtils.class, ctx -> { + ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class))) + .thenThrow(new RuntimeException("uploadFileToBookkeeper triggered")); + + }); - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; + registerBuiltinFunction("exclamation", getPulsarApiExamplesNar()); + when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - resource.registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig, null); + FunctionConfig functionConfig = createDefaultFunctionConfig(); + functionConfig.setJar("builtin://exclamation"); + registerFunction(tenant, namespace, function, null, mockedFormData, null, functionConfig); } - public static FunctionConfig createDefaultFunctionConfig() { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - return functionConfig; - } + /* + Externally managed runtime, + uploadBuiltinSinksSources == true + Make sure uploadFileToBookkeeper is called + */ + @Test + public void testRegisterFunctionSuccessK8sWithUpload() throws Exception { + final String injectedErrMsg = "uploadFileToBookkeeper triggered"; + mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(true); + + mockStatic(WorkerUtils.class, ctx -> { + ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class))) + .thenThrow(new RuntimeException(injectedErrMsg)); + + }); + + registerBuiltinFunction("exclamation", getPulsarApiExamplesNar()); + when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - public static FunctionDetails createDefaultFunctionDetails() { FunctionConfig functionConfig = createDefaultFunctionConfig(); - return FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + functionConfig.setJar("builtin://exclamation"); + + try { + registerFunction(tenant, namespace, function, null, mockedFormData, null, functionConfig); + Assert.fail(); + } catch (RuntimeException e) { + Assert.assertEquals(e.getMessage(), injectedErrMsg); + } } + } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java index 9f786c5b73aac..c6c6303e48007 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java @@ -20,241 +20,84 @@ import static org.apache.pulsar.functions.proto.Function.ProcessingGuarantees.ATLEAST_ONCE; import static org.apache.pulsar.functions.source.TopicSchema.DEFAULT_SERDE; -import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import com.google.common.collect.Lists; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; +import java.lang.reflect.Method; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.Packages; -import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; -import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.functions.Utils; import org.apache.pulsar.common.io.SinkConfig; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; -import org.apache.pulsar.functions.api.examples.ExclamationFunction; import org.apache.pulsar.functions.api.examples.RecordFunction; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; import org.apache.pulsar.functions.proto.Function.FunctionMetaData; -import org.apache.pulsar.functions.runtime.RuntimeFactory; -import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.SinkConfigUtils; -import org.apache.pulsar.functions.utils.functions.FunctionArchive; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; -import org.apache.pulsar.functions.utils.io.Connector; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; -import org.apache.pulsar.functions.worker.ConnectorsManager; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.FunctionsManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; import org.apache.pulsar.functions.worker.rest.api.SinksImpl; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; /** * Unit test of {@link SinksApiV3Resource}. */ -public class SinkApiV3ResourceTest { +public class SinkApiV3ResourceTest extends AbstractFunctionsResourceTest { - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; private static final String sink = "test-sink"; - private static final Map topicsToSerDeClassName = new HashMap<>(); - - static { - topicsToSerDeClassName.put("test_src", DEFAULT_SERDE); - } - - private static final String subscriptionName = "test-subscription"; - private static final String CASSANDRA_STRING_SINK = "org.apache.pulsar.io.cassandra.CassandraStringSink"; - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; + private SinksImpl resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetaData; - private LeaderService mockedLeaderService; - private Packages mockedPackages; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - private static Map mockStaticContexts = new HashMap<>(); - - private static final String SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH = "pulsar-io-cassandra.nar.path"; - - public static File getPulsarIOCassandraNar() { - return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH) - , "pulsar-io-cassandra.nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH + " system property")); - } - - private static final String SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH = "pulsar-io-twitter.nar.path"; - - public static File getPulsarIOTwitterNar() { - return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH) - , "pulsar-io-twitter.nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH + " system property")); - } - - private static final String SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH = "pulsar-io-invalid.nar.path"; - - public static File getPulsarIOInvalidNar() { - return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH) - , "invalid nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH + " system property")); - } - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedPackages = mock(Packages.class); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - doAnswer(invocationOnMock -> { - Files.copy(getPulsarIOCassandraNar().toPath(), Paths.get(invocationOnMock.getArgument(1, String.class)), - StandardCopyOption.REPLACE_EXISTING); - return null; - }).when(mockedPackages).download(any(), any()); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + @Override + protected void doSetup() { this.resource = spy(new SinksImpl(() -> mockedWorkerService)); - } - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); + @Override + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + if (method.getName().contains("Upload") || method.getName().contains("BKPackage")) { + workerConfig.setFunctionsWorkerEnablePackageManagement(false); } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); - } - - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = - mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); } - - private void mockWorkerUtils() { - mockWorkerUtils(null); + @Override + protected Function.FunctionDetails.ComponentType getComponentType() { + return Function.FunctionDetails.ComponentType.SINK; } - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); - } - - private void mockInstanceUtils() { - mockStatic(InstanceUtils.class, ctx -> { - ctx.when(() -> InstanceUtils.calculateSubjectType(any())) - .thenReturn(FunctionDetails.ComponentType.SINK); - }); + @Override + protected File getDefaultNarFile() { + return getPulsarIOCassandraNar(); } - - // - // Register Functions - // - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") public void testRegisterSinkMissingTenant() { try { @@ -334,8 +177,8 @@ public void testRegisterSinkMissingPackage() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink class UnknownClass must " - + "be in class path") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink class UnknownClass not " + + "found") public void testRegisterSinkWrongClassName() { mockInstanceUtils(); try { @@ -356,10 +199,8 @@ public void testRegisterSinkWrongClassName() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink package does not have the" - + " correct format. Pulsar cannot determine if the package is a NAR package" - + " or JAR package. Sink classname is not provided and attempts to load it as a NAR package produced the " - + "following error.") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink package doesn't contain " + + "the META-INF/services/pulsar-io.yaml file.") public void testRegisterSinkMissingPackageDetails() { mockInstanceUtils(); try { @@ -719,30 +560,11 @@ public void testRegisterSinkSuccessWithTransformFunction() throws Exception { when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - doReturn(RecordFunction.class).when(mockedClassLoader).loadClass("RecordFunction"); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getRawFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionClassParent(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - mockStatic(FunctionUtils.class, ctx -> { - ctx.when(() -> FunctionUtils.getFunctionClass(any())).thenReturn("RecordFunction"); - }); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("transform")).thenReturn(functionArchive); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); + registerBuiltinConnector("recordfunction", RecordFunction.class.getName()); + registerBuiltinFunction("transform", RecordFunction.class.getName()); SinkConfig sinkConfig = createDefaultSinkConfig(); + sinkConfig.setSinkType("builtin://recordfunction"); sinkConfig.setTransformFunction("builtin://transform"); sinkConfig.setTransformFunctionConfig("{\"dummy\": \"dummy\"}"); @@ -767,28 +589,7 @@ public void testRegisterSinkFailureWithInvalidTransformFunction() throws Excepti when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - doReturn(ExclamationFunction.class).when(mockedClassLoader).loadClass("ExclamationFunction"); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getRawFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionClassParent(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - mockStatic(FunctionUtils.class, ctx -> { - ctx.when(() -> FunctionUtils.getFunctionClass(any())).thenReturn("ExclamationFunction"); - }); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("transform")).thenReturn(functionArchive); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); + registerBuiltinFunction("transform", getPulsarApiExamplesNar()); SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setTransformFunction("builtin://transform"); @@ -943,16 +744,18 @@ public void testUpdateSinkDifferentInputs() throws Exception { public void testUpdateSinkDifferentParallelism() throws Exception { mockWorkerUtils(); - testUpdateSinkMissingArguments( - tenant, - namespace, - sink, - null, - mockedFormData, - topicsToSerDeClassName, - CASSANDRA_STRING_SINK, - parallelism + 1, - null); + try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { + testUpdateSinkMissingArguments( + tenant, + namespace, + sink, + inputStream, + mockedFormData, + topicsToSerDeClassName, + CASSANDRA_STRING_SINK, + parallelism + 1, + null); + } } private void testUpdateSinkMissingArguments( @@ -965,29 +768,7 @@ private void testUpdateSinkMissingArguments( String className, Integer parallelism, String expectedError) throws Exception { - mockStatic(ConnectorUtils.class, ctx -> { - ctx.when(() -> ConnectorUtils.getIOSinkClass(any(NarClassLoader.class))) - .thenReturn(CASSANDRA_STRING_SINK); - }); - - mockStatic(ClassLoaderUtils.class, ctx -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mock(NarClassLoader.class)); - ctx.when(() -> FunctionCommon - .convertProcessingGuarantee(eq(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE))) - .thenReturn(ATLEAST_ONCE); - }); - - this.mockedFunctionMetaData = - FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(sink))).thenReturn(mockedFunctionMetaData); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); + mockFunctionCommon(tenant, namespace, sink); SinkConfig sinkConfig = new SinkConfig(); if (tenant != null) { @@ -1033,25 +814,6 @@ private void updateDefaultSink() throws Exception { private void updateDefaultSinkWithPackageUrl(String packageUrl) throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); - mockStatic(ConnectorUtils.class, ctx -> { - ctx.when(() -> ConnectorUtils.getIOSinkClass(any(NarClassLoader.class))) - .thenReturn(CASSANDRA_STRING_SINK); - }); - - mockStatic(ClassLoaderUtils.class, ctx -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mock(NarClassLoader.class)); - ctx.when(() -> FunctionCommon - .convertProcessingGuarantee(eq(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE))) - .thenReturn(ATLEAST_ONCE); - }); - - this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); @@ -1084,7 +846,6 @@ public void testUpdateNotExistedSink() throws Exception { public void testUpdateSinkUploadFailure() throws Exception { try { mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( anyString(), any(File.class), @@ -1120,24 +881,6 @@ public void testUpdateSinkWithUrl() throws Exception { when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); - mockStatic(ConnectorUtils.class, ctx -> { - ctx.when(() -> ConnectorUtils.getIOSinkClass(any())) - .thenReturn(CASSANDRA_STRING_SINK); - }); - - mockStatic(ClassLoaderUtils.class, ctx -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.extractFileFromPkgURL(any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mock(NarClassLoader.class)); - ctx.when(() -> FunctionCommon - .convertProcessingGuarantee(eq(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE))) - .thenReturn(ATLEAST_ONCE); - }); - this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); @@ -1212,35 +955,17 @@ public void testUpdateSinkDifferentTransformFunction() throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setTransformFunction("builtin://transform"); - sinkConfig.setTransformFunctionClassName("DummyFunction"); sinkConfig.setTransformFunctionConfig("{\"dummy\": \"dummy\"}"); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - doReturn(RecordFunction.class).when(mockedClassLoader).loadClass("DummyFunction"); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getRawFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionClassParent(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); + registerBuiltinFunction("transform", RecordFunction.class.getName()); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(sink))).thenReturn(mockedFunctionMetaData); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("transform")).thenReturn(functionArchive); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - - try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { resource.updateSink( tenant, @@ -1730,6 +1455,14 @@ private SinkConfig createDefaultSinkConfig() { return sinkConfig; } + private void mockFunctionCommon(String tenant, String namespace, String sink) throws IOException { + this.mockedFunctionMetaData = + Function.FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(sink))).thenReturn(mockedFunctionMetaData); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); + } + private FunctionDetails createDefaultFunctionDetails() throws IOException { return SinkConfigUtils.convert(createDefaultSinkConfig(), new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); @@ -1753,21 +1486,7 @@ public void testRegisterSinkSuccessK8sNoUpload() throws Exception { }); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - ConnectorsManager mockedConnManager = mock(ConnectorsManager.class); - Connector connector = Connector.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedConnManager.getConnector("cassandra")).thenReturn(connector); - when(mockedConnManager.getSinkArchive(any())).thenReturn(getPulsarIOCassandraNar().toPath()); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mockedConnManager); + registerBuiltinConnector("cassandra", getPulsarIOCassandraNar()); when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); @@ -1775,17 +1494,15 @@ public void testRegisterSinkSuccessK8sNoUpload() throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setArchive("builtin://cassandra"); - try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { - resource.registerSink( - tenant, - namespace, - sink, - inputStream, - mockedFormData, - null, - sinkConfig, - null); - } + resource.registerSink( + tenant, + namespace, + sink, + null, + mockedFormData, + null, + sinkConfig, + null); } /* @@ -1807,23 +1524,7 @@ public void testRegisterSinkSuccessK8sWithUpload() throws Exception { }); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - ConnectorsManager mockedConnManager = mock(ConnectorsManager.class); - Connector connector = Connector.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedConnManager.getConnector("cassandra")).thenReturn(connector); - when(mockedConnManager.getSinkArchive(any())).thenReturn(getPulsarIOCassandraNar().toPath()); - - when(mockedWorkerService.getConnectorsManager()).thenReturn(mockedConnManager); - + registerBuiltinConnector("cassandra", getPulsarIOCassandraNar()); when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); @@ -1831,21 +1532,84 @@ public void testRegisterSinkSuccessK8sWithUpload() throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setArchive("builtin://cassandra"); + try { + resource.registerSink( + tenant, + namespace, + sink, + null, + mockedFormData, + null, + sinkConfig, + null); + Assert.fail(); + } catch (RuntimeException e) { + Assert.assertEquals(e.getMessage(), injectedErrMsg); + } + } + + @Test + public void testUpdateSinkWithNoChange() throws IOException { + mockWorkerUtils(); + + // No change on config, + SinkConfig sinkConfig = createDefaultSinkConfig(); + + mockStatic(SinkConfigUtils.class, ctx -> { + ctx.when(() -> SinkConfigUtils.convertFromDetails(any())).thenReturn(sinkConfig); + }); + + mockFunctionCommon(sinkConfig.getTenant(), sinkConfig.getNamespace(), sinkConfig.getName()); + + // config has not changes and don't update auth, should fail + try { + resource.updateSink( + sinkConfig.getTenant(), + sinkConfig.getNamespace(), + sinkConfig.getName(), + null, + mockedFormData, + null, + sinkConfig, + null, + null); + fail("Update without changes should fail"); + } catch (RestException e) { + assertTrue(e.getMessage().contains("Update contains no change")); + } + + try { + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(false); + resource.updateSink( + sinkConfig.getTenant(), + sinkConfig.getNamespace(), + sinkConfig.getName(), + null, + mockedFormData, + null, + sinkConfig, + null, + updateOptions); + fail("Update without changes should fail"); + } catch (RestException e) { + assertTrue(e.getMessage().contains("Update contains no change")); + } + + // no changes but set the auth-update flag to true, should not fail + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(true); try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { - try { - resource.registerSink( - tenant, - namespace, - sink, - inputStream, - mockedFormData, - null, - sinkConfig, - null); - Assert.fail(); - } catch (RuntimeException e) { - Assert.assertEquals(e.getMessage(), injectedErrMsg); - } + resource.updateSink( + sinkConfig.getTenant(), + sinkConfig.getNamespace(), + sinkConfig.getName(), + inputStream, + mockedFormData, + null, + sinkConfig, + null, + updateOptions); } } } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java index 36be2de716661..f02acbd3663bf 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java @@ -18,51 +18,34 @@ */ package org.apache.pulsar.functions.worker.rest.api.v3; -import static org.apache.pulsar.functions.worker.rest.api.v3.SinkApiV3ResourceTest.getPulsarIOCassandraNar; -import static org.apache.pulsar.functions.worker.rest.api.v3.SinkApiV3ResourceTest.getPulsarIOInvalidNar; -import static org.apache.pulsar.functions.worker.rest.api.v3.SinkApiV3ResourceTest.getPulsarIOTwitterNar; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.common.collect.Lists; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; -import java.util.HashMap; +import java.lang.reflect.Method; import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.Packages; -import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.functions.Utils; import org.apache.pulsar.common.io.SourceConfig; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.api.utils.IdentityFunction; @@ -72,159 +55,43 @@ import org.apache.pulsar.functions.proto.Function.ProcessingGuarantees; import org.apache.pulsar.functions.proto.Function.SinkSpec; import org.apache.pulsar.functions.proto.Function.SourceSpec; -import org.apache.pulsar.functions.runtime.RuntimeFactory; import org.apache.pulsar.functions.source.TopicSchema; -import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.SourceConfigUtils; import org.apache.pulsar.functions.utils.io.ConnectorUtils; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; import org.apache.pulsar.functions.worker.rest.api.SourcesImpl; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; import org.mockito.MockedStatic; import org.mockito.Mockito; -import org.testng.annotations.AfterClass; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; /** * Unit test of {@link SourcesApiV3Resource}. */ -public class SourceApiV3ResourceTest { +public class SourceApiV3ResourceTest extends AbstractFunctionsResourceTest { - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; private static final String source = "test-source"; private static final String outputTopic = "test-output-topic"; private static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; private static final String TWITTER_FIRE_HOSE = "org.apache.pulsar.io.twitter.TwitterFireHose"; - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; private SourcesImpl resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetaData; - private LeaderService mockedLeaderService; - private Packages mockedPackages; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - - private static NarClassLoader narClassLoader; - private static Map mockStaticContexts = new HashMap<>(); - - @BeforeClass - public void setupNarClassLoader() throws IOException { - narClassLoader = NarClassLoaderBuilder.builder().narFile(getPulsarIOTwitterNar()).build(); - } - - @AfterClass(alwaysRun = true) - public void cleanupNarClassLoader() throws IOException { - if (narClassLoader != null) { - narClassLoader.close(); - narClassLoader = null; - } - } - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedPackages = mock(Packages.class); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - doAnswer(invocationOnMock -> { - Files.copy(getPulsarIOTwitterNar().toPath(), Paths.get(invocationOnMock.getArgument(1, String.class)), - StandardCopyOption.REPLACE_EXISTING); - return null; - }).when(mockedPackages).download(any(), any()); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + @Override + protected void doSetup() { this.resource = spy(new SourcesImpl(() -> mockedWorkerService)); } - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = - mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); + @Override + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + if (method.getName().endsWith("UploadFailure") || method.getName().contains("BKPackage")) { + workerConfig.setFunctionsWorkerEnablePackageManagement(false); } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); } - private void mockWorkerUtils() { - mockStatic(WorkerUtils.class, - ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - }); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); + @Override + protected FunctionDetails.ComponentType getComponentType() { + return FunctionDetails.ComponentType.SOURCE; } // @@ -294,8 +161,8 @@ public void testRegisterSourceMissingSourceName() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source class UnknownClass must" - + " be in class path") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source class UnknownClass not " + + "found in class loader") public void testRegisterSourceWrongClassName() { try { testRegisterSourceMissingArguments( @@ -358,10 +225,8 @@ public void testRegisterSourceMissingPackageDetails() throws IOException { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source package does not have the" - + " correct format. Pulsar cannot determine if the package is a NAR package" - + " or JAR package. Source classname is not provided and attempts to load it as a NAR package " - + "produced the following error.") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source package doesn't contain" + + " the META-INF/services/pulsar-io.yaml file.") public void testRegisterSourceMissingPackageDetailsAndClassname() { try { testRegisterSourceMissingArguments( @@ -382,8 +247,8 @@ public void testRegisterSourceMissingPackageDetailsAndClassname() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Failed to extract source class" - + " from archive") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source package doesn't contain" + + " the META-INF/services/pulsar-io.yaml file.") public void testRegisterSourceInvalidJarWithNoSource() throws IOException { try (InputStream inputStream = new FileInputStream(getPulsarIOInvalidNar())) { testRegisterSourceMissingArguments( @@ -521,7 +386,7 @@ public void testUpdateMissingSinkConfig() { } private void registerDefaultSource() throws IOException { - registerDefaultSourceWithPackageUrl("source://public/default/test@v1"); + registerDefaultSourceWithPackageUrl(getPulsarIOTwitterNar().toURI().toString()); } private void registerDefaultSourceWithPackageUrl(String packageUrl) throws IOException { @@ -562,8 +427,6 @@ public void testRegisterSourceUploadFailure() throws Exception { any(File.class), any(Namespace.class))) .thenThrow(new IOException("upload failure")); - - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); }); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(false); @@ -590,7 +453,7 @@ public void testRegisterSourceSuccess() throws Exception { @Test(timeOut = 20000) public void testRegisterSourceSuccessWithPackageName() throws IOException { - registerDefaultSourceWithPackageUrl("source://public/default/test@v1"); + registerDefaultSource(); } @Test(timeOut = 20000) @@ -618,14 +481,7 @@ public void testRegisterSourceConflictingFields() throws Exception { when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(true); when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); - SourceConfig sourceConfig = new SourceConfig(); - sourceConfig.setTenant(tenant); - sourceConfig.setNamespace(namespace); - sourceConfig.setName(source); - sourceConfig.setClassName(TWITTER_FIRE_HOSE); - sourceConfig.setParallelism(parallelism); - sourceConfig.setTopicName(outputTopic); - sourceConfig.setSerdeClassName(outputSerdeClassName); + SourceConfig sourceConfig = createDefaultSourceConfig(); try (InputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { resource.registerSource( actualTenant, @@ -812,38 +668,106 @@ public void testUpdateSourceChangedParallelism() throws Exception { try { mockWorkerUtils(); + try(FileInputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + testUpdateSourceMissingArguments( + tenant, + namespace, + source, + inputStream, + mockedFormData, + outputTopic, + outputSerdeClassName, + TWITTER_FIRE_HOSE, + parallelism + 1, + null); + } + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testUpdateSourceChangedTopic() throws Exception { + mockWorkerUtils(); + + try(FileInputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { testUpdateSourceMissingArguments( tenant, namespace, source, - null, + inputStream, mockedFormData, - outputTopic, + "DifferentTopic", outputSerdeClassName, TWITTER_FIRE_HOSE, - parallelism + 1, + parallelism, null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; } } @Test - public void testUpdateSourceChangedTopic() throws Exception { + public void testUpdateSourceWithNoChange() throws IOException { mockWorkerUtils(); - testUpdateSourceMissingArguments( - tenant, - namespace, - source, - null, - mockedFormData, - "DifferentTopic", - outputSerdeClassName, - TWITTER_FIRE_HOSE, - parallelism, - null); + // No change on config, + SourceConfig sourceConfig = createDefaultSourceConfig(); + mockStatic(SourceConfigUtils.class, ctx -> { + ctx.when(() -> SourceConfigUtils.convertFromDetails(any())).thenReturn(sourceConfig); + }); + + mockFunctionCommon(sourceConfig.getTenant(), sourceConfig.getNamespace(), sourceConfig.getName()); + + // config has not changes and don't update auth, should fail + try { + resource.updateSource( + sourceConfig.getTenant(), + sourceConfig.getNamespace(), + sourceConfig.getName(), + null, + mockedFormData, + null, + sourceConfig, + null, + null); + fail("Update without changes should fail"); + } catch (RestException e) { + assertTrue(e.getMessage().contains("Update contains no change")); + } + + try { + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(false); + resource.updateSource( + sourceConfig.getTenant(), + sourceConfig.getNamespace(), + sourceConfig.getName(), + null, + mockedFormData, + null, + sourceConfig, + null, + updateOptions); + fail("Update without changes should fail"); + } catch (RestException e) { + assertTrue(e.getMessage().contains("Update contains no change")); + } + + // no changes but set the auth-update flag to true, should not fail + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(true); + try (InputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + resource.updateSource( + sourceConfig.getTenant(), + sourceConfig.getNamespace(), + sourceConfig.getName(), + inputStream, + mockedFormData, + null, + sourceConfig, + null, + updateOptions); + } } @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source parallelism must be a " @@ -881,26 +805,7 @@ private void testUpdateSourceMissingArguments( Integer parallelism, String expectedError) throws Exception { - mockStatic(ConnectorUtils.class, c -> { - }); - mockStatic(ClassLoaderUtils.class, c -> { - }); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSourceType(argThat(clazz -> clazz.getName().equals(TWITTER_FIRE_HOSE)))) - .thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())) - .thenReturn(narClassLoader); - - - }); - - this.mockedFunctionMetaData = - FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + mockFunctionCommon(tenant, namespace, function); SourceConfig sourceConfig = new SourceConfig(); if (tenant != null) { @@ -942,50 +847,39 @@ private void testUpdateSourceMissingArguments( } - private void updateDefaultSource() throws Exception { - updateDefaultSourceWithPackageUrl(null); - } - - private void updateDefaultSourceWithPackageUrl(String packageUrl) throws Exception { - SourceConfig sourceConfig = new SourceConfig(); - sourceConfig.setTenant(tenant); - sourceConfig.setNamespace(namespace); - sourceConfig.setName(source); - sourceConfig.setClassName(TWITTER_FIRE_HOSE); - sourceConfig.setParallelism(parallelism); - sourceConfig.setTopicName(outputTopic); - sourceConfig.setSerdeClassName(outputSerdeClassName); - + private void mockFunctionCommon(String tenant, String namespace, String function) { mockStatic(ConnectorUtils.class, c -> { }); - mockStatic(ClassLoaderUtils.class, c -> { }); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSourceType(argThat(clazz -> clazz.getName().equals(TWITTER_FIRE_HOSE)))) - .thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())) - .thenReturn(narClassLoader); - }); + this.mockedFunctionMetaData = + FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + } + + private void updateDefaultSource() throws Exception { + updateDefaultSourceWithPackageUrl(getPulsarIOTwitterNar().toURI().toString()); + } + + private void updateDefaultSourceWithPackageUrl(String packageUrl) throws Exception { + SourceConfig sourceConfig = createDefaultSourceConfig(); this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); - try (InputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { - resource.updateSource( - tenant, - namespace, - source, - inputStream, - mockedFormData, - packageUrl, - sourceConfig, - null, null); - } + resource.updateSource( + tenant, + namespace, + source, + null, + mockedFormData, + packageUrl, + sourceConfig, + null, null); } @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source test-source doesn't " + @@ -1008,11 +902,25 @@ public void testUpdateSourceUploadFailure() throws Exception { anyString(), any(File.class), any(Namespace.class))).thenThrow(new IOException("upload failure")); - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); }); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(true); - updateDefaultSource(); + SourceConfig sourceConfig = createDefaultSourceConfig(); + this.mockedFunctionMetaData = + FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); + + try(InputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + resource.updateSource( + tenant, + namespace, + source, + inputStream, + mockedFormData, + null, + sourceConfig, + null, null); + } } catch (RestException re) { assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); throw re; @@ -1032,16 +940,9 @@ public void testUpdateSourceSuccess() throws Exception { public void testUpdateSourceWithUrl() throws Exception { Configurator.setRootLevel(Level.DEBUG); - String filePackageUrl = getPulsarIOCassandraNar().toURI().toString(); + String filePackageUrl = getPulsarIOTwitterNar().toURI().toString(); - SourceConfig sourceConfig = new SourceConfig(); - sourceConfig.setTopicName(outputTopic); - sourceConfig.setSerdeClassName(outputSerdeClassName); - sourceConfig.setTenant(tenant); - sourceConfig.setNamespace(namespace); - sourceConfig.setName(source); - sourceConfig.setClassName(TWITTER_FIRE_HOSE); - sourceConfig.setParallelism(parallelism); + SourceConfig sourceConfig = createDefaultSourceConfig(); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(true); mockStatic(ConnectorUtils.class, c -> { @@ -1049,15 +950,6 @@ public void testUpdateSourceWithUrl() throws Exception { mockStatic(ClassLoaderUtils.class, c -> { }); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.extractFileFromPkgURL(any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSourceType(argThat(clazz -> clazz.getName().equals(TWITTER_FIRE_HOSE)))) - .thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())) - .thenReturn(narClassLoader); - }); - this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); diff --git a/pulsar-io/aerospike/pom.xml b/pulsar-io/aerospike/pom.xml index 9f058741cafbd..d29689b52418c 100644 --- a/pulsar-io/aerospike/pom.xml +++ b/pulsar-io/aerospike/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-aerospike Pulsar IO :: Aerospike - - ${project.groupId} pulsar-io-core ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.aerospike aerospike-client-bc ${aerospike-client.version} + + + org.bouncycastle + * + + + + + org.bouncycastle + bcpkix-jdk18on - - @@ -81,6 +83,4 @@ - - diff --git a/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml b/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-io/alluxio/pom.xml b/pulsar-io/alluxio/pom.xml index b0f6a51b79cf4..f0ddbf427f684 100644 --- a/pulsar-io/alluxio/pom.xml +++ b/pulsar-io/alluxio/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.0.0-SNAPSHOT - - - - 2.7.3 - 4.1.11 - 1.37.0 - - - pulsar-io-alluxio - Pulsar IO :: Alluxio - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - test - - - - org.alluxio - alluxio-core-client-fs - ${alluxio.version} - - - grpc-netty - io.grpc - - - - - - org.alluxio - alluxio-minicluster - ${alluxio.version} - test - - - org.glassfish - javax.el - - - grpc-netty - io.grpc - - - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - com.google.guava - guava - - - - - io.grpc - grpc-netty - ${grpc.version} - - - - io.dropwizard.metrics - metrics-jvm - ${metrics.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - + + 4.0.0 + + pulsar-io + io.streamnative + 3.0.0-SNAPSHOT + + + 2.7.3 + 4.1.11 + 1.37.0 + + pulsar-io-alluxio + Pulsar IO :: Alluxio + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + test + + + org.alluxio + alluxio-core-client-fs + ${alluxio.version} + + + grpc-netty + io.grpc + + + + + org.alluxio + alluxio-minicluster + ${alluxio.version} + test + + + org.glassfish + javax.el + + + grpc-netty + io.grpc + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + com.google.guava + guava + + + + io.grpc + grpc-netty + ${grpc.version} + + + io.dropwizard.metrics + metrics-jvm + ${metrics.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/aws/pom.xml b/pulsar-io/aws/pom.xml index 2c5ba9450d258..5d236be733ce0 100644 --- a/pulsar-io/aws/pom.xml +++ b/pulsar-io/aws/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-aws Pulsar IO :: IO AWS - ${project.groupId} pulsar-io-core ${project.version} - com.google.code.gson gson - com.amazonaws aws-java-sdk-sts - software.amazon.awssdk sts 2.10.56 - - + - diff --git a/pulsar-io/aws/src/main/resources/findbugsExclude.xml b/pulsar-io/aws/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-io/aws/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/aws/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-io/batch-data-generator/pom.xml b/pulsar-io/batch-data-generator/pom.xml index 621dd226f0eef..f62ef085aa17d 100644 --- a/pulsar-io/batch-data-generator/pom.xml +++ b/pulsar-io/batch-data-generator/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - - pulsar-io-batch-data-generator - Pulsar IO :: Batch Data Generator - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-io-batch-discovery-triggerers - ${project.version} - - - - org.springframework - spring-context - ${spring.version} - - - - io.codearte.jfairy - jfairy - 0.5.9 - - - - org.apache.avro - avro - ${avro.version} - - - - ${project.groupId} - pulsar-functions-local-runner-original - ${project.version} - test - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT + + pulsar-io-batch-data-generator + Pulsar IO :: Batch Data Generator + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-io-batch-discovery-triggerers + ${project.version} + + + org.springframework + spring-context + ${spring.version} + + + io.codearte.jfairy + jfairy + 0.5.9 + + + org.apache.avro + avro + ${avro.version} + + + ${project.groupId} + pulsar-functions-local-runner-original + ${project.version} + test + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml b/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml index 0b1652ef62881..1d0d64fba7e67 100644 --- a/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-batch-discovery-triggerers Pulsar IO :: Batch Discovery Triggerers - ${project.groupId} pulsar-io-core ${project.version} - com.cronutils cron-utils ${cron-utils.version} - org.springframework spring-context ${spring.version} - - @@ -73,5 +66,4 @@ - diff --git a/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml b/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - 4.0.0 - - pulsar-io-canal - Pulsar IO :: Canal - - - 1.1.5 - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - com.alibaba - fastjson - 1.2.83 - - - - org.springframework - spring-core - ${spring.version} - - - org.springframework - spring-aop - ${spring.version} - - - org.springframework - spring-context - ${spring.version} - - - org.springframework - spring-jdbc - ${spring.version} - - - org.springframework - spring-orm - ${spring.version} - - - - com.alibaba.otter - canal.protocol - ${canal.version} - - - ch.qos.logback - * - - - - - com.alibaba.otter - canal.client - ${canal.version} - - - ch.qos.logback - * - - - org.springframework - * - - - - - - org.apache.logging.log4j - log4j-core - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - - \ No newline at end of file + + + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT + + 4.0.0 + pulsar-io-canal + Pulsar IO :: Canal + + 1.1.5 + + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + com.alibaba + fastjson + 1.2.83 + + + org.springframework + spring-core + ${spring.version} + + + org.springframework + spring-aop + ${spring.version} + + + org.springframework + spring-context + ${spring.version} + + + org.springframework + spring-jdbc + ${spring.version} + + + org.springframework + spring-orm + ${spring.version} + + + com.alibaba.otter + canal.protocol + ${canal.version} + + + ch.qos.logback + * + + + + + com.alibaba.otter + canal.client + ${canal.version} + + + ch.qos.logback + * + + + org.springframework + * + + + + + org.apache.logging.log4j + log4j-core + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + diff --git a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java index 06c8788d5aea1..7d0cd0305a49e 100644 --- a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java +++ b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java @@ -57,7 +57,7 @@ public abstract class CanalAbstractSource extends PushSource { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - canalSourceConfig = CanalSourceConfig.load(config); + canalSourceConfig = CanalSourceConfig.load(config, sourceContext); if (canalSourceConfig.getCluster()) { connector = CanalConnectors.newClusterConnector(canalSourceConfig.getZkServers(), canalSourceConfig.getDestination(), canalSourceConfig.getUsername(), diff --git a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java index a0408e60e5f76..5a754988ffdc1 100644 --- a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java +++ b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java @@ -26,6 +26,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @@ -86,8 +88,7 @@ public static CanalSourceConfig load(String yamlFile) throws IOException { } - public static CanalSourceConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), CanalSourceConfig.class); + public static CanalSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, CanalSourceConfig.class, sourceContext); } } diff --git a/pulsar-io/cassandra/pom.xml b/pulsar-io/cassandra/pom.xml index 4d9296ea2b35c..1ca3a66ab37d2 100644 --- a/pulsar-io/cassandra/pom.xml +++ b/pulsar-io/cassandra/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-cassandra Pulsar IO :: Cassandra - - ${project.groupId} pulsar-io-core ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.datastax.cassandra cassandra-driver-core - - diff --git a/pulsar-io/common/pom.xml b/pulsar-io/common/pom.xml index b975d9fef07f6..122527bb4bbb6 100644 --- a/pulsar-io/common/pom.xml +++ b/pulsar-io/common/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - - pulsar-io-common - Pulsar IO :: IO Common - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - com.fasterxml.jackson.core - jackson-databind - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT + + pulsar-io-common + Pulsar IO :: IO Common + + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java b/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java index d15986a897caa..69d981bf68728 100644 --- a/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java +++ b/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java @@ -77,13 +77,14 @@ private static T loadWithSecrets(Map map, Class clazz, } } configs.computeIfAbsent(field.getName(), key -> { - if (fieldDoc.required()) { - throw new IllegalArgumentException(field.getName() + " cannot be null"); - } + // Use default value if it is not null before checking required String value = fieldDoc.defaultValue(); if (value != null && !value.isEmpty()) { return value; } + if (fieldDoc.required()) { + throw new IllegalArgumentException(field.getName() + " cannot be null"); + } return null; }); } diff --git a/pulsar-io/common/src/main/resources/findbugsExclude.xml b/pulsar-io/common/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/common/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/common/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-core Pulsar IO :: IO - ${project.groupId} @@ -37,7 +35,6 @@ ${project.version} - @@ -59,5 +56,4 @@ - diff --git a/pulsar-io/core/src/main/resources/findbugsExclude.xml b/pulsar-io/core/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/core/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/core/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - - pulsar-io-data-generator - Pulsar IO :: Data Generator - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-config-validation - ${project.version} - - - - io.codearte.jfairy - jfairy - 0.5.9 - - - - org.apache.avro - avro - ${avro.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT + + pulsar-io-data-generator + Pulsar IO :: Data Generator + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-config-validation + ${project.version} + + + io.codearte.jfairy + jfairy + 0.5.9 + + + org.apache.avro + avro + ${avro.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/debezium/core/pom.xml b/pulsar-io/debezium/core/pom.xml index 4588019a9aa97..f9790674c81f5 100644 --- a/pulsar-io/debezium/core/pom.xml +++ b/pulsar-io/debezium/core/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.0.0-SNAPSHOT - pulsar-io-debezium-core Pulsar IO :: Debezium :: Core - - ${project.groupId} pulsar-io-core ${project.version} provided - io.debezium debezium-core ${debezium.version} - org.apache.commons commons-lang3 - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-io-kafka-connect-adaptor ${project.version} - org.apache.kafka connect-runtime @@ -71,23 +63,24 @@ org.apache.kafka kafka-log4j-appender + + jose4j + org.bitbucket.b_c + - ${project.groupId} pulsar-broker ${project.version} test - ${project.groupId} testmocks ${project.version} test - ${project.groupId} pulsar-broker @@ -95,21 +88,17 @@ test test-jar - io.debezium debezium-connector-mysql ${debezium.version} test - ${project.groupId} pulsar-client-original ${project.version} test - - diff --git a/pulsar-io/debezium/mongodb/pom.xml b/pulsar-io/debezium/mongodb/pom.xml index b672d64e6147f..81455d0000bd8 100644 --- a/pulsar-io/debezium/mongodb/pom.xml +++ b/pulsar-io/debezium/mongodb/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io-debezium - 3.0.0-SNAPSHOT - - - pulsar-io-debezium-mongodb - Pulsar IO :: Debezium :: mongodb - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - provided - - - - ${project.groupId} - pulsar-io-debezium-core - ${project.version} - - - - io.debezium - debezium-connector-mongodb - ${debezium.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - + + 4.0.0 + + io.streamnative + pulsar-io-debezium + 3.0.0-SNAPSHOT + + pulsar-io-debezium-mongodb + Pulsar IO :: Debezium :: mongodb + + + ${project.groupId} + pulsar-io-core + ${project.version} + provided + + + ${project.groupId} + pulsar-io-debezium-core + ${project.version} + + + io.debezium + debezium-connector-mongodb + ${debezium.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/debezium/mssql/pom.xml b/pulsar-io/debezium/mssql/pom.xml index 1151940d770f4..3c562c9677c94 100644 --- a/pulsar-io/debezium/mssql/pom.xml +++ b/pulsar-io/debezium/mssql/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.0.0-SNAPSHOT - pulsar-io-debezium-mssql Pulsar IO :: Debezium :: Microsoft SQL - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-sqlserver ${debezium.version} - - @@ -61,5 +54,4 @@ - diff --git a/pulsar-io/debezium/mysql/pom.xml b/pulsar-io/debezium/mysql/pom.xml index 47bb9506cb851..472614768da07 100644 --- a/pulsar-io/debezium/mysql/pom.xml +++ b/pulsar-io/debezium/mysql/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.0.0-SNAPSHOT - pulsar-io-debezium-mysql Pulsar IO :: Debezium :: mysql - ${project.groupId} @@ -37,21 +35,17 @@ ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-mysql ${debezium.version} - - @@ -61,8 +55,6 @@ - - @@ -71,5 +63,4 @@ - diff --git a/pulsar-io/debezium/oracle/pom.xml b/pulsar-io/debezium/oracle/pom.xml index ee158d6fb8414..3b735ebac4964 100644 --- a/pulsar-io/debezium/oracle/pom.xml +++ b/pulsar-io/debezium/oracle/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.0.0-SNAPSHOT - pulsar-io-debezium-oracle Pulsar IO :: Debezium :: oracle - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-oracle ${debezium.version} - - @@ -61,5 +54,4 @@ - diff --git a/pulsar-io/debezium/pom.xml b/pulsar-io/debezium/pom.xml index 050da1af45330..976fc07c68162 100644 --- a/pulsar-io/debezium/pom.xml +++ b/pulsar-io/debezium/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-debezium Pulsar IO :: Debezium - @@ -70,7 +68,6 @@ - core mysql @@ -79,5 +76,4 @@ oracle mssql - diff --git a/pulsar-io/debezium/postgres/pom.xml b/pulsar-io/debezium/postgres/pom.xml index 0bab38c71ef10..a9f3957a26b08 100644 --- a/pulsar-io/debezium/postgres/pom.xml +++ b/pulsar-io/debezium/postgres/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.0.0-SNAPSHOT - pulsar-io-debezium-postgres Pulsar IO :: Debezium :: postgres - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-postgres ${debezium.version} - org.postgresql postgresql ${debezium.postgresql.version} runtime - - @@ -68,5 +60,4 @@ - diff --git a/pulsar-io/docs/pom.xml b/pulsar-io/docs/pom.xml index 305c7f1473077..603d9d8b6dd8d 100644 --- a/pulsar-io/docs/pom.xml +++ b/pulsar-io/docs/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-docs Pulsar IO :: Docs - - ${project.groupId} pulsar-io-core @@ -45,7 +42,6 @@ com.beust jcommander - ${project.groupId} @@ -218,7 +214,6 @@ ${project.version} - @@ -267,5 +262,4 @@ - diff --git a/pulsar-io/docs/src/main/java/org/apache/pulsar/io/docs/ConnectorDocGenerator.java b/pulsar-io/docs/src/main/java/org/apache/pulsar/io/docs/ConnectorDocGenerator.java index fe12b2b11ce84..fec7b12087977 100644 --- a/pulsar-io/docs/src/main/java/org/apache/pulsar/io/docs/ConnectorDocGenerator.java +++ b/pulsar-io/docs/src/main/java/org/apache/pulsar/io/docs/ConnectorDocGenerator.java @@ -21,6 +21,7 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.google.common.base.Strings; +import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; @@ -28,11 +29,9 @@ import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.net.URL; -import java.net.URLClassLoader; import java.nio.charset.StandardCharsets; -import java.nio.file.Paths; +import java.nio.file.Path; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Set; import lombok.extern.slf4j.Slf4j; @@ -47,21 +46,12 @@ public class ConnectorDocGenerator { private static final String INDENT = " "; private static Reflections newReflections() throws Exception { - List urls = new ArrayList<>(); - ClassLoader[] classLoaders = new ClassLoader[] { - ConnectorDocGenerator.class.getClassLoader(), - Thread.currentThread().getContextClassLoader() - }; - for (int i = 0; i < classLoaders.length; i++) { - if (classLoaders[i] instanceof URLClassLoader) { - urls.addAll(Arrays.asList(((URLClassLoader) classLoaders[i]).getURLs())); - } else { - throw new RuntimeException("ClassLoader '" + classLoaders[i] + " is not an instance of URLClassLoader"); - } + final String[] classpathList = System.getProperty("java.class.path").split(":"); + final List urlList = new ArrayList<>(); + for (String file : classpathList) { + urlList.add(new File(file).toURI().toURL()); } - ConfigurationBuilder confBuilder = new ConfigurationBuilder(); - confBuilder.setUrls(urls); - return new Reflections(confBuilder); + return new Reflections(new ConfigurationBuilder().setUrls(urlList)); } private final Reflections reflections; @@ -70,7 +60,7 @@ public ConnectorDocGenerator() throws Exception { this.reflections = newReflections(); } - private void generateConnectorYaml(Class configClass, PrintWriter writer) { + private void generateConnectorYamlFile(Class configClass, PrintWriter writer) { log.info("Processing connector config class : {}", configClass); writer.println("configs:"); @@ -82,7 +72,9 @@ private void generateConnectorYaml(Class configClass, PrintWriter writer) { } FieldDoc fieldDoc = field.getDeclaredAnnotation(FieldDoc.class); if (null == fieldDoc) { - throw new RuntimeException("Missing `FieldDoc` for field '" + field.getName() + "'"); + final String message = "Missing FieldDoc for field '%s' in class '%s'." + .formatted(field.getName(), configClass.getCanonicalName()); + throw new RuntimeException(message); } writer.println(INDENT + "# " + fieldDoc.help()); String fieldPrefix = ""; @@ -99,28 +91,28 @@ private void generateConnectorYaml(Class configClass, PrintWriter writer) { writer.flush(); } - private void generateConnectorYaml(Class connectorClass, Connector connectorDef, PrintWriter writer) { + private void generateConnectorYamlFile(Class connectorClass, Connector connectorDef, PrintWriter writer) { log.info("Processing connector definition : {}", connectorDef); writer.println("# " + connectorDef.type() + " connector : " + connectorClass.getName()); writer.println(); writer.println("# " + connectorDef.help()); writer.println(); - generateConnectorYaml(connectorDef.configClass(), writer); + generateConnectorYamlFile(connectorDef.configClass(), writer); } - private void generatorConnectorYamls(String outputDir) throws IOException { + private void generatorConnectorYamlFiles(String outputDir) throws IOException { Set> connectorClasses = reflections.getTypesAnnotatedWith(Connector.class); log.info("Retrieve all `Connector` annotated classes : {}", connectorClasses); for (Class connectorClass : connectorClasses) { - Connector connectorDef = connectorClass.getDeclaredAnnotation(Connector.class); - try (FileOutputStream fos = new FileOutputStream( - Paths.get( - outputDir, - "pulsar-io-" + connectorDef.name() - + "-" + connectorDef.type().name().toLowerCase()).toString() + ".yml")) { + final Connector connectorDef = connectorClass.getDeclaredAnnotation(Connector.class); + final String name = connectorDef.name().toLowerCase(); + final String type = connectorDef.type().name().toLowerCase(); + final String filename = "pulsar-io-%s-%s.yml".formatted(name, type); + final Path outputPath = Path.of(outputDir, filename); + try (FileOutputStream fos = new FileOutputStream(outputPath.toFile())) { PrintWriter pw = new PrintWriter(new OutputStreamWriter(fos, StandardCharsets.UTF_8)); - generateConnectorYaml(connectorClass, connectorDef, pw); + generateConnectorYamlFile(connectorClass, connectorDef, pw); pw.flush(); } } @@ -130,23 +122,14 @@ private void generatorConnectorYamls(String outputDir) throws IOException { * Args for stats generator. */ private static class MainArgs { - @Parameter( - names = { - "-o", "--output-dir" - }, - description = "The output dir to dump connector docs", - required = true - ) + names = {"-o", "--output-dir"}, + description = "The output dir to dump connector docs", + required = true) String outputDir = null; - @Parameter( - names = { - "-h", "--help" - }, - description = "Show this help message") + @Parameter(names = {"-h", "--help"}, description = "Show this help message") boolean help = false; - } public static void main(String[] args) throws Exception { @@ -169,7 +152,7 @@ public static void main(String[] args) throws Exception { } ConnectorDocGenerator docGen = new ConnectorDocGenerator(); - docGen.generatorConnectorYamls(mainArgs.outputDir); + docGen.generatorConnectorYamlFiles(mainArgs.outputDir); } } diff --git a/pulsar-io/docs/src/main/resources/findbugsExclude.xml b/pulsar-io/docs/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/docs/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/docs/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-dynamodb Pulsar IO :: DynamoDB - - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-instance ${project.version} provided - ${project.groupId} pulsar-io-aws ${project.version} - org.apache.commons commons-lang3 - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.dataformat jackson-dataformat-cbor - com.google.code.gson gson - com.amazonaws aws-java-sdk-core - com.amazonaws @@ -90,9 +83,7 @@ 1.5.1 - - @@ -101,5 +92,4 @@ - diff --git a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java index d67c4e21154ee..2193cf39c17a5 100644 --- a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java +++ b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java @@ -65,7 +65,7 @@ public void close() throws Exception { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - this.dynamodbSourceConfig = DynamoDBSourceConfig.load(config); + this.dynamodbSourceConfig = DynamoDBSourceConfig.load(config, sourceContext); checkArgument(isNotBlank(dynamodbSourceConfig.getAwsDynamodbStreamArn()), "empty dynamo-stream arn"); // Even if the endpoint is set, it seems to require a region to go with it diff --git a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java index b734dd5741155..0547ff8f863e0 100644 --- a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java +++ b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java @@ -35,6 +35,8 @@ import java.util.Map; import lombok.Data; import org.apache.pulsar.io.aws.AwsCredentialProviderPlugin; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; import software.amazon.awssdk.regions.Region; @@ -77,6 +79,7 @@ public class DynamoDBSourceConfig implements Serializable { @FieldDoc( required = false, defaultValue = "", + sensitive = true, help = "json-parameters to initialize `AwsCredentialsProviderPlugin`") private String awsCredentialPluginParam = ""; @@ -170,9 +173,8 @@ public static DynamoDBSourceConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), DynamoDBSourceConfig.class); } - public static DynamoDBSourceConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), DynamoDBSourceConfig.class); + public static DynamoDBSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, DynamoDBSourceConfig.class, sourceContext); } protected Region regionAsV2Region() { diff --git a/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java b/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java index f84cb785896e6..bdccaa2e5846e 100644 --- a/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java +++ b/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java @@ -31,6 +31,8 @@ import java.util.Map; import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream; +import org.apache.pulsar.io.core.SourceContext; +import org.mockito.Mockito; import org.testng.annotations.Test; @@ -90,7 +92,8 @@ public final void loadFromMapTest() throws IOException { map.put("initialPositionInStream", InitialPositionInStream.TRIM_HORIZON); map.put("startAtTime", DAY); - DynamoDBSourceConfig config = DynamoDBSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + DynamoDBSourceConfig config = DynamoDBSourceConfig.load(map, sourceContext); assertNotNull(config); assertEquals(config.getAwsEndpoint(), "https://some.endpoint.aws"); @@ -111,7 +114,46 @@ public final void loadFromMapTest() throws IOException { ZonedDateTime expected = ZonedDateTime.ofInstant(DAY.toInstant(), ZoneOffset.UTC); assertEquals(actual, expected); } - + + @Test + public final void loadFromMapCredentialFromSecretTest() throws IOException { + Map map = new HashMap (); + map.put("awsEndpoint", "https://some.endpoint.aws"); + map.put("awsRegion", "us-east-1"); + map.put("awsDynamodbStreamArn", "arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291"); + map.put("checkpointInterval", "30000"); + map.put("backoffTime", "4000"); + map.put("numRetries", "3"); + map.put("receiveQueueSize", 2000); + map.put("applicationName", "My test application"); + map.put("initialPositionInStream", InitialPositionInStream.TRIM_HORIZON); + map.put("startAtTime", DAY); + + SourceContext sourceContext = Mockito.mock(SourceContext.class); + Mockito.when(sourceContext.getSecret("awsCredentialPluginParam")) + .thenReturn("{\"accessKey\":\"myKey\",\"secretKey\":\"my-Secret\"}"); + DynamoDBSourceConfig config = DynamoDBSourceConfig.load(map, sourceContext); + + assertNotNull(config); + assertEquals(config.getAwsEndpoint(), "https://some.endpoint.aws"); + assertEquals(config.getAwsRegion(), "us-east-1"); + assertEquals(config.getAwsDynamodbStreamArn(), "arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291"); + assertEquals(config.getAwsCredentialPluginParam(), + "{\"accessKey\":\"myKey\",\"secretKey\":\"my-Secret\"}"); + assertEquals(config.getApplicationName(), "My test application"); + assertEquals(config.getCheckpointInterval(), 30000); + assertEquals(config.getBackoffTime(), 4000); + assertEquals(config.getNumRetries(), 3); + assertEquals(config.getReceiveQueueSize(), 2000); + assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); + + Calendar cal = Calendar.getInstance(); + cal.setTime(config.getStartAtTime()); + ZonedDateTime actual = ZonedDateTime.ofInstant(cal.toInstant(), ZoneOffset.UTC); + ZonedDateTime expected = ZonedDateTime.ofInstant(DAY.toInstant(), ZoneOffset.UTC); + assertEquals(actual, expected); + } + @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "empty aws-credential param") public final void missingCredentialsTest() throws Exception { @@ -121,7 +163,8 @@ public final void missingCredentialsTest() throws Exception { map.put("awsDynamodbStreamArn", "arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291"); DynamoDBSource source = new DynamoDBSource(); - source.open(map, null); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + source.open(map, sourceContext); } @Test(expectedExceptions = IllegalArgumentException.class, @@ -136,7 +179,8 @@ public final void missingStartTimeTest() throws Exception { map.put("initialPositionInStream", InitialPositionInStream.AT_TIMESTAMP); DynamoDBSource source = new DynamoDBSource(); - source.open(map, null); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + source.open(map, sourceContext); } private File getFile(String name) { diff --git a/pulsar-io/elastic-search/pom.xml b/pulsar-io/elastic-search/pom.xml index f9f87e5bd02b2..9dbd0b4078d36 100644 --- a/pulsar-io/elastic-search/pom.xml +++ b/pulsar-io/elastic-search/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - - pulsar-io-flume - Pulsar IO :: Flume - - - 1.8.2 - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - org.apache.flume - flume-ng-node - 1.9.0 - pom - - - org.apache.avro - avro-ipc - - - avro - org.apache.avro - - - - - org.apache.avro - avro - ${avro.version} - - - commons-lang - commons-lang - 2.6 - - - org.apache.avro - avro-ipc - ${avro.version} - - - org.mortbay.jetty - servlet-api - - - io.netty - netty - - - - - org.apache.curator - curator-framework - ${curator.version} - - - org.apache.curator - curator-test - ${curator.version} - test - - - - io.dropwizard.metrics - metrics-core - test - - - - org.xerial.snappy - snappy-java - test - - - - com.google.guava - guava - ${guava.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - + + io.dropwizard.metrics + metrics-core + test + + + + org.xerial.snappy + snappy-java + test + + + com.google.guava + guava + ${guava.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + + - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + diff --git a/pulsar-io/hbase/pom.xml b/pulsar-io/hbase/pom.xml index 33396394de4a4..dfd65c0b0b71c 100644 --- a/pulsar-io/hbase/pom.xml +++ b/pulsar-io/hbase/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.0.0-SNAPSHOT - - pulsar-io-hbase - Pulsar IO :: Hbase - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - com.google.guava - guava - - - - org.apache.hbase - hbase-client - ${hbase.version} - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - - - - - org.apache.hbase - hbase-common - ${hbase.version} - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + diff --git a/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml b/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml index c2b520a765643..460cbd497c15f 100644 --- a/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml +++ b/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml @@ -1,3 +1,4 @@ + - - hbase.cluster.distributed - true - - - hbase.rootdir - hdfs://localhost:8020/hbase - - - hbase.zookeeper.quorum - localhost - - - hbase.zookeeper.property.clientPort - 2181 - - - zookeeper.znode.parent - /hbase - + + hbase.cluster.distributed + true + + + hbase.rootdir + hdfs://localhost:8020/hbase + + + hbase.zookeeper.quorum + localhost + + + hbase.zookeeper.property.clientPort + 2181 + + + zookeeper.znode.parent + /hbase + diff --git a/pulsar-io/hdfs2/pom.xml b/pulsar-io/hdfs2/pom.xml index 1f43b31a0aa6a..56d5e2fc6c195 100644 --- a/pulsar-io/hdfs2/pom.xml +++ b/pulsar-io/hdfs2/pom.xml @@ -1,3 +1,4 @@ + - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - - \ No newline at end of file + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + + diff --git a/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml b/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml index 980349c4d8d5e..93756f7f28461 100644 --- a/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - fs.defaultFS - hdfs://0.0.0.0:8020 - - - io.compression.codecs - org.apache.hadoop.io.compress.GzipCodec, + + fs.defaultFS + hdfs://0.0.0.0:8020 + + + io.compression.codecs + org.apache.hadoop.io.compress.GzipCodec, org.apache.hadoop.io.compress.DefaultCodec, org.apache.hadoop.io.compress.SnappyCodec - + diff --git a/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml b/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml index bb722f1f63470..1b35ebd585012 100644 --- a/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml +++ b/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml @@ -1,3 +1,4 @@ + - - dfs.replication - 1 - - - dfs.client.use.datanode.hostname - true - - - dfs.support.append - true - + + dfs.replication + 1 + + + dfs.client.use.datanode.hostname + true + + + dfs.support.append + true + diff --git a/pulsar-io/hdfs3/pom.xml b/pulsar-io/hdfs3/pom.xml index 3fbc030184c5c..5104424dbdafc 100644 --- a/pulsar-io/hdfs3/pom.xml +++ b/pulsar-io/hdfs3/pom.xml @@ -1,3 +1,4 @@ + - - fs.defaultFS - hdfs://0.0.0.0:8020 - - - io.compression.codecs - org.apache.hadoop.io.compress.GzipCodec, + + fs.defaultFS + hdfs://0.0.0.0:8020 + + + io.compression.codecs + org.apache.hadoop.io.compress.GzipCodec, org.apache.hadoop.io.compress.DefaultCodec, org.apache.hadoop.io.compress.SnappyCodec - + diff --git a/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml b/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml index bb722f1f63470..1b35ebd585012 100644 --- a/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml +++ b/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml @@ -1,3 +1,4 @@ + - - dfs.replication - 1 - - - dfs.client.use.datanode.hostname - true - - - dfs.support.append - true - + + dfs.replication + 1 + + + dfs.client.use.datanode.hostname + true + + + dfs.support.append + true + diff --git a/pulsar-io/http/pom.xml b/pulsar-io/http/pom.xml index d86201a67b30f..9525885f26461 100644 --- a/pulsar-io/http/pom.xml +++ b/pulsar-io/http/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - - pulsar-io-http - Pulsar IO :: HTTP - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - - org.apache.avro - avro - ${avro.version} - - - - org.apache.pulsar - pulsar-client-original - ${project.version} - test - - - - com.github.tomakehurst - wiremock-jre8 - ${wiremock.version} - test - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT + + pulsar-io-http + Pulsar IO :: HTTP + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + org.apache.avro + avro + ${avro.version} + + + io.streamnative + pulsar-client-original + ${project.version} + test + + + com.github.tomakehurst + wiremock-jre8 + ${wiremock.version} + test + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/influxdb/pom.xml b/pulsar-io/influxdb/pom.xml index 91571cf66e803..b436d7b988881 100644 --- a/pulsar-io/influxdb/pom.xml +++ b/pulsar-io/influxdb/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.0.0-SNAPSHOT - - - pulsar-io-influxdb - Pulsar IO :: InfluxDB - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - - com.influxdb - influxdb-client-java - 4.0.0 - - - - org.influxdb - influxdb-java - 2.22 - - - com.squareup.okhttp3 - * - - - - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-collections4 - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - \ No newline at end of file + + 4.0.0 + + pulsar-io + io.streamnative + 3.0.0-SNAPSHOT + + pulsar-io-influxdb + Pulsar IO :: InfluxDB + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-functions-instance + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + com.influxdb + influxdb-client-java + 4.0.0 + + + org.influxdb + influxdb-java + 2.22 + + + com.squareup.okhttp3 + * + + + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-collections4 + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java index 5b51461fc7b8e..0d431f84c52f2 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java @@ -46,12 +46,12 @@ public class InfluxDBGenericRecordSink implements Sink { @Override public void open(Map map, SinkContext sinkContext) throws Exception { try { - val configV2 = InfluxDBSinkConfig.load(map); + val configV2 = InfluxDBSinkConfig.load(map, sinkContext); configV2.validate(); sink = new InfluxDBSink(); } catch (Exception e) { try { - val configV1 = org.apache.pulsar.io.influxdb.v1.InfluxDBSinkConfig.load(map); + val configV1 = org.apache.pulsar.io.influxdb.v1.InfluxDBSinkConfig.load(map, sinkContext); configV1.validate(); sink = new org.apache.pulsar.io.influxdb.v1.InfluxDBGenericRecordSink(); } catch (Exception e1) { diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java index 06856bad80edc..217c5304b24f7 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java @@ -43,7 +43,7 @@ public abstract class InfluxDBAbstractSink extends BatchSink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config); + InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config, sinkContext); influxDBSinkConfig.validate(); super.init(influxDBSinkConfig.getBatchTimeMs(), influxDBSinkConfig.getBatchSize()); diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java index 9b7d8e1ce905d..4ae2cf1e4a3a1 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java @@ -27,6 +27,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -94,7 +96,7 @@ public class InfluxDBSinkConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "1000L", + defaultValue = "1000", help = "The InfluxDB operation time in milliseconds") private long batchTimeMs = 1000L; @@ -110,14 +112,11 @@ public static InfluxDBSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class); } - public static InfluxDBSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), InfluxDBSinkConfig.class); + public static InfluxDBSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, InfluxDBSinkConfig.class, sinkContext); } public void validate() { - Preconditions.checkNotNull(influxdbUrl, "influxdbUrl property not set."); - Preconditions.checkNotNull(database, "database property not set."); Preconditions.checkArgument(batchSize > 0, "batchSize must be a positive integer."); Preconditions.checkArgument(batchTimeMs > 0, "batchTimeMs must be a positive long."); } diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java index 08f1ab2339992..0aa43570596af 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java @@ -49,7 +49,7 @@ public class InfluxDBSink extends BatchSink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config); + InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config, sinkContext); influxDBSinkConfig.validate(); super.init(influxDBSinkConfig.getBatchTimeMs(), influxDBSinkConfig.getBatchSize()); diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java index 899b00c002155..ea87ee66b90a3 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java @@ -27,6 +27,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -87,7 +89,7 @@ public class InfluxDBSinkConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "1000L", + defaultValue = "1000", help = "The InfluxDB operation time in milliseconds") private long batchTimeMs = 1000; @@ -103,17 +105,11 @@ public static InfluxDBSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class); } - public static InfluxDBSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), InfluxDBSinkConfig.class); + public static InfluxDBSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, InfluxDBSinkConfig.class, sinkContext); } public void validate() { - Preconditions.checkNotNull(influxdbUrl, "influxdbUrl property not set."); - Preconditions.checkNotNull(token, "token property not set."); - Preconditions.checkNotNull(organization, "organization property not set."); - Preconditions.checkNotNull(bucket, "bucket property not set."); - Preconditions.checkArgument(batchSize > 0, "batchSize must be a positive integer."); Preconditions.checkArgument(batchTimeMs > 0, "batchTimeMs must be a positive long."); } diff --git a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java index 4493dcfb24854..10b1bfb624f49 100644 --- a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java +++ b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.influxdb.v1; +import org.apache.pulsar.io.core.SinkContext; import org.influxdb.InfluxDB; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -60,8 +62,11 @@ public final void loadFromMapTest() throws IOException { map.put("gzipEnable", "false"); map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); + map.put("username", "admin"); + map.put("password", "admin"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals("http://localhost:8086", config.getInfluxdbUrl()); assertEquals("test_db", config.getDatabase()); @@ -71,6 +76,39 @@ public final void loadFromMapTest() throws IOException { assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable()); assertEquals(Long.parseLong("1000"), config.getBatchTimeMs()); assertEquals(Integer.parseInt("100"), config.getBatchSize()); + assertEquals("admin", config.getUsername()); + assertEquals("admin", config.getPassword()); + } + + @Test + public final void loadFromMapCredentialFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("influxdbUrl", "http://localhost:8086"); + map.put("database", "test_db"); + map.put("consistencyLevel", "ONE"); + map.put("logLevel", "NONE"); + map.put("retentionPolicy", "autogen"); + map.put("gzipEnable", "false"); + map.put("batchTimeMs", "1000"); + map.put("batchSize", "100"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("username")) + .thenReturn("admin"); + Mockito.when(sinkContext.getSecret("password")) + .thenReturn("admin"); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals("http://localhost:8086", config.getInfluxdbUrl()); + assertEquals("test_db", config.getDatabase()); + assertEquals("ONE", config.getConsistencyLevel()); + assertEquals("NONE", config.getLogLevel()); + assertEquals("autogen", config.getRetentionPolicy()); + assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable()); + assertEquals(Long.parseLong("1000"), config.getBatchTimeMs()); + assertEquals(Integer.parseInt("100"), config.getBatchSize()); + assertEquals("admin", config.getUsername()); + assertEquals("admin", config.getPassword()); } @Test @@ -85,12 +123,13 @@ public final void validValidateTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "influxdbUrl property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "influxdbUrl cannot be null") public final void missingInfluxdbUrlValidateTest() throws IOException { Map map = new HashMap<>(); map.put("database", "test_db"); @@ -101,7 +140,8 @@ public final void missingInfluxdbUrlValidateTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } @@ -118,7 +158,8 @@ public final void invalidBatchSizeTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "-100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } @@ -135,7 +176,8 @@ public final void invalidConsistencyLevelTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); InfluxDB.ConsistencyLevel.valueOf(config.getConsistencyLevel().toUpperCase()); diff --git a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java index df1f7fd29a637..d6cee1e308d2b 100644 --- a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java +++ b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java @@ -24,6 +24,8 @@ import java.io.File; import java.util.HashMap; import java.util.Map; +import org.apache.pulsar.io.core.SinkContext; +import org.mockito.Mockito; import org.testng.annotations.Test; public class InfluxDBSinkConfigTest { @@ -58,18 +60,34 @@ private Map buildValidConfigMap() { public final void testLoadFromMap() throws Exception { Map map = buildValidConfigMap(); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); assertNotNull(config); config.validate(); verifyValues(config); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "influxdbUrl property not set.") + @Test + public final void testLoadFromMapCredentialFromSecret() throws Exception { + Map map = buildValidConfigMap(); + map.remove("token"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("token")) + .thenReturn("xxxx"); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); + assertNotNull(config); + config.validate(); + verifyValues(config); + } + + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "influxdbUrl cannot be null") public void testRequiredConfigMissing() throws Exception { Map map = buildValidConfigMap(); map.remove("influxdbUrl"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } @@ -78,7 +96,8 @@ public void testRequiredConfigMissing() throws Exception { public void testBatchConfig() throws Exception { Map map = buildValidConfigMap(); map.put("batchSize", -1); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } diff --git a/pulsar-io/jdbc/clickhouse/pom.xml b/pulsar-io/jdbc/clickhouse/pom.xml index 89ac29d72653a..cbf39587e9cb6 100644 --- a/pulsar-io/jdbc/clickhouse/pom.xml +++ b/pulsar-io/jdbc/clickhouse/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.0.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-clickhouse Pulsar IO :: Jdbc :: ClickHouse - ${project.groupId} @@ -45,13 +42,12 @@ all - * - * + * + * - diff --git a/pulsar-io/jdbc/core/pom.xml b/pulsar-io/jdbc/core/pom.xml index f9dd1e0e96346..034b51fedd8f9 100644 --- a/pulsar-io/jdbc/core/pom.xml +++ b/pulsar-io/jdbc/core/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.0.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-core Pulsar IO :: Jdbc :: Core - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core ${project.version} - org.apache.avro avro ${avro.version} - com.fasterxml.jackson.core jackson-databind - com.google.guava guava - com.fasterxml.jackson.dataformat jackson-dataformat-yaml + + com.google.protobuf + protobuf-java + provided + - - \ No newline at end of file + diff --git a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/BaseJdbcAutoSchemaSink.java b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/BaseJdbcAutoSchemaSink.java index 1c98069403c52..36c3674091932 100644 --- a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/BaseJdbcAutoSchemaSink.java +++ b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/BaseJdbcAutoSchemaSink.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; import java.sql.PreparedStatement; import java.util.ArrayList; import java.util.HashMap; @@ -185,8 +186,10 @@ private static void setColumnValue(PreparedStatement statement, int index, Objec statement.setString(index, (String) value); } else if (value instanceof Short) { statement.setShort(index, (Short) value); + } else if (value instanceof ByteString) { + statement.setBytes(index, ((ByteString) value).toByteArray()); } else { - throw new Exception("Not support value type, need to add it. " + value.getClass()); + throw new Exception("Not supported value type, need to add it. " + value.getClass()); } } diff --git a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java index 4586fcebcf167..ca33b3cfdaba9 100644 --- a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java +++ b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java @@ -76,7 +76,7 @@ public abstract class JdbcAbstractSink implements Sink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - jdbcSinkConfig = JdbcSinkConfig.load(config); + jdbcSinkConfig = JdbcSinkConfig.load(config, sinkContext); jdbcSinkConfig.validate(); jdbcUrl = jdbcSinkConfig.getJdbcUrl(); diff --git a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java index f798d94f7c35e..854d68381312c 100644 --- a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java +++ b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java @@ -26,6 +26,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -145,9 +147,8 @@ public static JdbcSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), JdbcSinkConfig.class); } - public static JdbcSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), JdbcSinkConfig.class); + public static JdbcSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, JdbcSinkConfig.class, sinkContext); } public void validate() { diff --git a/pulsar-io/jdbc/mariadb/pom.xml b/pulsar-io/jdbc/mariadb/pom.xml index 7e96a2901456f..737fdd0704c32 100644 --- a/pulsar-io/jdbc/mariadb/pom.xml +++ b/pulsar-io/jdbc/mariadb/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.0.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-mariadb Pulsar IO :: Jdbc :: Mariadb - ${project.groupId} @@ -52,4 +49,4 @@ - \ No newline at end of file + diff --git a/pulsar-io/jdbc/openmldb/pom.xml b/pulsar-io/jdbc/openmldb/pom.xml index 1b2bab67d04ab..09d36b016f732 100644 --- a/pulsar-io/jdbc/openmldb/pom.xml +++ b/pulsar-io/jdbc/openmldb/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.0.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-openmldb Pulsar IO :: Jdbc :: OpenMLDB - ${project.groupId} @@ -60,7 +57,6 @@ runtime - diff --git a/pulsar-io/jdbc/pom.xml b/pulsar-io/jdbc/pom.xml index 3f841ab183399..0e2b8d229867f 100644 --- a/pulsar-io/jdbc/pom.xml +++ b/pulsar-io/jdbc/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom @@ -31,12 +31,10 @@ openmldb - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-jdbc Pulsar IO :: Jdbc - diff --git a/pulsar-io/jdbc/postgres/pom.xml b/pulsar-io/jdbc/postgres/pom.xml index 3ae7eaf93bb7c..a6ae052e42a07 100644 --- a/pulsar-io/jdbc/postgres/pom.xml +++ b/pulsar-io/jdbc/postgres/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.0.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-postgres Pulsar IO :: Jdbc :: Postgres - ${project.groupId} @@ -44,7 +41,6 @@ runtime - diff --git a/pulsar-io/jdbc/sqlite/pom.xml b/pulsar-io/jdbc/sqlite/pom.xml index 6c688af0ae15e..f920de0adcc67 100644 --- a/pulsar-io/jdbc/sqlite/pom.xml +++ b/pulsar-io/jdbc/sqlite/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.0.0-SNAPSHOT 4.0.0 pulsar-io-jdbc-sqlite Pulsar IO :: Jdbc :: Sqlite - ${project.groupId} @@ -60,7 +58,6 @@ test - @@ -69,4 +66,4 @@ - \ No newline at end of file + diff --git a/pulsar-io/kafka-connect-adaptor-nar/pom.xml b/pulsar-io/kafka-connect-adaptor-nar/pom.xml index bd742f0f20332..5b9f8cbdc99a0 100644 --- a/pulsar-io/kafka-connect-adaptor-nar/pom.xml +++ b/pulsar-io/kafka-connect-adaptor-nar/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-kafka-connect-adaptor-nar Pulsar IO :: Kafka Connect Adaptor NAR - ${project.groupId} @@ -37,8 +35,6 @@ ${project.version} - - @@ -48,7 +44,30 @@ pulsar-io-kafka-connect-adaptor-${project.version} + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + - diff --git a/pulsar-io/kafka-connect-adaptor/pom.xml b/pulsar-io/kafka-connect-adaptor/pom.xml index 12ebda087eb3d..35cd8318836e8 100644 --- a/pulsar-io/kafka-connect-adaptor/pom.xml +++ b/pulsar-io/kafka-connect-adaptor/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-kafka-connect-adaptor Pulsar IO :: Kafka Connect Adaptor - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-common ${project.version} compile - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - org.apache.kafka connect-runtime @@ -73,21 +66,34 @@ org.eclipse.jetty * + + jose4j + org.bitbucket.b_c + - org.apache.kafka connect-json ${kafka-client.version} + + + jose4j + org.bitbucket.b_c + + - org.apache.kafka connect-api ${kafka-client.version} + + + jose4j + org.bitbucket.b_c + + - ${project.groupId} @@ -100,45 +106,50 @@ - io.netty netty-buffer - org.apache.commons commons-lang3 - io.confluent kafka-connect-avro-converter ${confluent.version} + + + org.apache.avro + avro + + - ${project.groupId} pulsar-broker ${project.version} test - org.apache.kafka connect-file ${kafka-client.version} test + + + jose4j + org.bitbucket.b_c + + - ${project.groupId} testmocks ${project.version} test - ${project.groupId} pulsar-broker @@ -146,31 +157,37 @@ test test-jar - + + org.bouncycastle + bc-fips + ${bouncycastle.bc-fips.version} + test + org.apache.avro avro ${avro.version} provided - com.google.protobuf protobuf-java provided - org.asynchttpclient async-http-client test - com.typesafe.netty netty-reactive-streams test + + com.google.protobuf + protobuf-java + 3.5.1 + - diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java index f5f6efd08bd99..f2ee8a8e6cafe 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java @@ -27,6 +27,8 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.connect.json.JsonConverterConfig; import org.apache.kafka.connect.source.SourceRecord; import org.apache.pulsar.client.api.Schema; @@ -57,6 +59,7 @@ public void open(Map config, SourceContext sourceContext) throws config.put(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, false); } log.info("jsonWithEnvelope: {}", jsonWithEnvelope); + super.open(config, sourceContext); } @@ -69,17 +72,26 @@ public synchronized KafkaSourceRecord processSourceRecord(final SourceRecord src private static final AvroData avroData = new AvroData(1000); - private class KafkaSourceRecord extends AbstractKafkaSourceRecord> + public class KafkaSourceRecord extends AbstractKafkaSourceRecord> implements KVRecord { + final int keySize; + final int valueSize; + + final SourceRecord srcRecord; + KafkaSourceRecord(SourceRecord srcRecord) { super(srcRecord); + this.srcRecord = srcRecord; + byte[] keyBytes = keyConverter.fromConnectData( srcRecord.topic(), srcRecord.keySchema(), srcRecord.key()); + keySize = keyBytes != null ? keyBytes.length : 0; this.key = keyBytes != null ? Optional.of(Base64.getEncoder().encodeToString(keyBytes)) : Optional.empty(); byte[] valueBytes = valueConverter.fromConnectData( srcRecord.topic(), srcRecord.valueSchema(), srcRecord.value()); + valueSize = valueBytes != null ? valueBytes.length : 0; this.value = new KeyValue<>(keyBytes, valueBytes); @@ -145,6 +157,35 @@ public KeyValueEncodingType getKeyValueEncodingType() { } } + @Override + public void ack() { + // first try to commitRecord() for the current record in the batch + // then call super.ack() which calls commit() after complete batch of records is processed + try { + if (log.isDebugEnabled()) { + log.debug("commitRecord() for record: {}", srcRecord); + } + getSourceTask().commitRecord(srcRecord, + new RecordMetadata( + new TopicPartition(srcRecord.topic() == null + ? topicName.orElse("UNDEFINED") + : srcRecord.topic(), + srcRecord.kafkaPartition() == null ? 0 : srcRecord.kafkaPartition()), + -1L, // baseOffset == -1L means no offset + 0, // batchIndex, doesn't matter if baseOffset == -1L + null == srcRecord.timestamp() ? -1L : srcRecord.timestamp(), + keySize, // serializedKeySize + valueSize // serializedValueSize + )); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.error("Source task failed to commit record, " + + "source task should resend data, will get duplicate", e); + return; + } + super.ack(); + } + } } diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/ErrRecFileStreamSourceTask.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/ErrRecFileStreamSourceTask.java new file mode 100644 index 0000000000000..cbdd4c41bf692 --- /dev/null +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/ErrRecFileStreamSourceTask.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.connect.file.FileStreamSourceTask; +import org.apache.kafka.connect.source.SourceRecord; + +public class ErrRecFileStreamSourceTask extends FileStreamSourceTask { + + @Override + public void commitRecord(SourceRecord record, RecordMetadata metadata) throws InterruptedException { + throw new org.apache.kafka.connect.errors.ConnectException("blah"); + } + +} diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSinkTest.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSinkTest.java index 5410e0bb8d664..1100b13b425b4 100644 --- a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSinkTest.java +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSinkTest.java @@ -1572,7 +1572,7 @@ public void testGetMessageSequenceRefForBatchMessage() throws Exception { assertNull(ref); ref = KafkaConnectSink.getMessageSequenceRefForBatchMessage( - new TopicMessageIdImpl("topic-0", "topic", new MessageIdImpl(ledgerId, entryId, 0)) + new TopicMessageIdImpl("topic-0", new MessageIdImpl(ledgerId, entryId, 0)) ); assertNull(ref); @@ -1584,7 +1584,7 @@ public void testGetMessageSequenceRefForBatchMessage() throws Exception { assertEquals(ref.getBatchIdx(), batchIdx); ref = KafkaConnectSink.getMessageSequenceRefForBatchMessage( - new TopicMessageIdImpl("topic-0", "topic", new BatchMessageIdImpl(ledgerId, entryId, 0, batchIdx)) + new TopicMessageIdImpl("topic-0", new BatchMessageIdImpl(ledgerId, entryId, 0, batchIdx)) ); assertEquals(ref.getLedgerId(), ledgerId); diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSourceErrRecTest.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSourceErrRecTest.java new file mode 100644 index 0000000000000..9872e1fbc7e2f --- /dev/null +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSourceErrRecTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.io.kafka.connect; + +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.connect.file.FileStreamSourceConnector; +import org.apache.kafka.connect.runtime.TaskConfig; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.common.schema.KeyValue; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.io.core.SourceContext; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import java.io.File; +import java.io.OutputStream; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.Map; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +/** + * Test the implementation of {@link KafkaConnectSource}. + */ +@Slf4j +public class KafkaConnectSourceErrRecTest extends ProducerConsumerBase { + + private Map config = new HashMap<>(); + private String offsetTopicName; + // The topic to publish data to, for kafkaSource + private String topicName; + private KafkaConnectSource kafkaConnectSource; + private File tempFile; + private SourceContext context; + private PulsarClient client; + + @BeforeMethod + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + + config.put(TaskConfig.TASK_CLASS_CONFIG, "org.apache.pulsar.io.kafka.connect.ErrRecFileStreamSourceTask"); + config.put(PulsarKafkaWorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter"); + config.put(PulsarKafkaWorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter"); + + this.offsetTopicName = "persistent://my-property/my-ns/kafka-connect-source-offset"; + config.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, offsetTopicName); + + this.topicName = "persistent://my-property/my-ns/kafka-connect-source"; + config.put(FileStreamSourceConnector.TOPIC_CONFIG, topicName); + tempFile = File.createTempFile("some-file-name", null); + config.put(FileStreamSourceConnector.FILE_CONFIG, tempFile.getAbsoluteFile().toString()); + config.put(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG, String.valueOf(FileStreamSourceConnector.DEFAULT_TASK_BATCH_SIZE)); + + this.context = mock(SourceContext.class); + this.client = PulsarClient.builder() + .serviceUrl(brokerUrl.toString()) + .build(); + when(context.getPulsarClient()).thenReturn(this.client); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + if (this.client != null) { + this.client.close(); + } + tempFile.delete(); + super.internalCleanup(); + } + + @Test + public void testCommitRecordCalled() throws Exception { + kafkaConnectSource = new KafkaConnectSource(); + kafkaConnectSource.open(config, context); + + // use FileStreamSourceConnector, each line is a record, need "\n" and end of each record. + OutputStream os = Files.newOutputStream(tempFile.toPath()); + + String line1 = "This is the first line\n"; + os.write(line1.getBytes()); + os.flush(); + os.close(); + + Record> record = kafkaConnectSource.read(); + + assertTrue(record instanceof KafkaConnectSource.KafkaSourceRecord); + + try { + record.ack(); + fail("expected exception"); + } catch (Exception e) { + log.info("got exception", e); + assertTrue(e instanceof org.apache.kafka.connect.errors.ConnectException); + } + } +} diff --git a/pulsar-io/kafka/pom.xml b/pulsar-io/kafka/pom.xml index 9e322961889ed..70aff1a6fce77 100644 --- a/pulsar-io/kafka/pom.xml +++ b/pulsar-io/kafka/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-kafka Pulsar IO :: Kafka - @@ -44,73 +42,75 @@ - - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-client-original ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.google.guava guava - org.apache.kafka kafka-clients ${kafka-client.version} + + + jose4j + org.bitbucket.b_c + + - io.confluent kafka-schema-registry-client ${confluent.version} - io.confluent kafka-avro-serializer ${confluent.version} - io.jsonwebtoken jjwt-impl - io.jsonwebtoken jjwt-jackson - org.hamcrest hamcrest test - + + org.awaitility + awaitility + test + - diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java index 8fbd6c8186110..b1710d2c0b72d 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Map; -import java.util.Objects; import java.util.Properties; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -79,10 +78,7 @@ protected Properties beforeCreateProducer(Properties props) { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - kafkaSinkConfig = KafkaSinkConfig.load(config); - Objects.requireNonNull(kafkaSinkConfig.getTopic(), "Kafka topic is not set"); - Objects.requireNonNull(kafkaSinkConfig.getBootstrapServers(), "Kafka bootstrapServers is not set"); - Objects.requireNonNull(kafkaSinkConfig.getAcks(), "Kafka acks mode is not set"); + kafkaSinkConfig = KafkaSinkConfig.load(config, sinkContext); if (kafkaSinkConfig.getBatchSize() <= 0) { throw new IllegalArgumentException("Invalid Kafka Producer batchSize : " + kafkaSinkConfig.getBatchSize()); diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java index 565c36047474b..cd0db92bd78de 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java @@ -27,7 +27,6 @@ import java.util.Optional; import java.util.Properties; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -45,7 +44,6 @@ import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.functions.api.KVRecord; import org.apache.pulsar.functions.api.Record; -import org.apache.pulsar.io.core.PushSource; import org.apache.pulsar.io.core.SourceContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,7 +51,7 @@ /** * Simple Kafka Source to transfer messages from a Kafka topic. */ -public abstract class KafkaAbstractSource extends PushSource { +public abstract class KafkaAbstractSource extends KafkaPushSource { public static final String HEADER_KAFKA_TOPIC_KEY = "__kafka_topic"; public static final String HEADER_KAFKA_PTN_KEY = "__kafka_partition"; public static final String HEADER_KAFKA_OFFSET_KEY = "__kafka_offset"; @@ -67,7 +65,7 @@ public abstract class KafkaAbstractSource extends PushSource { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - kafkaSourceConfig = KafkaSourceConfig.load(config); + kafkaSourceConfig = KafkaSourceConfig.load(config, sourceContext); Objects.requireNonNull(kafkaSourceConfig.getTopic(), "Kafka topic is not set"); Objects.requireNonNull(kafkaSourceConfig.getBootstrapServers(), "Kafka bootstrapServers is not set"); Objects.requireNonNull(kafkaSourceConfig.getGroupId(), "Kafka consumer group id is not set"); @@ -133,7 +131,6 @@ public void open(Map config, SourceContext sourceContext) throws throw new IllegalArgumentException("Unable to instantiate Kafka consumer", ex); } this.start(); - running = true; } protected Properties beforeCreateConsumer(Properties props) { @@ -158,38 +155,36 @@ public void close() throws InterruptedException { @SuppressWarnings("unchecked") public void start() { + LOG.info("Starting subscribe kafka source on {}", kafkaSourceConfig.getTopic()); + consumer.subscribe(Collections.singletonList(kafkaSourceConfig.getTopic())); runnerThread = new Thread(() -> { - LOG.info("Starting kafka source on {}", kafkaSourceConfig.getTopic()); - consumer.subscribe(Collections.singletonList(kafkaSourceConfig.getTopic())); LOG.info("Kafka source started."); while (running) { - ConsumerRecords consumerRecords = consumer.poll(Duration.ofSeconds(1L)); - CompletableFuture[] futures = new CompletableFuture[consumerRecords.count()]; - int index = 0; - for (ConsumerRecord consumerRecord : consumerRecords) { - KafkaRecord record = buildRecord(consumerRecord); - if (LOG.isDebugEnabled()) { - LOG.debug("Write record {} {} {}", record.getKey(), record.getValue(), record.getSchema()); + try { + ConsumerRecords consumerRecords = consumer.poll(Duration.ofSeconds(1L)); + CompletableFuture[] futures = new CompletableFuture[consumerRecords.count()]; + int index = 0; + for (ConsumerRecord consumerRecord : consumerRecords) { + KafkaRecord record = buildRecord(consumerRecord); + if (LOG.isDebugEnabled()) { + LOG.debug("Write record {} {} {}", record.getKey(), record.getValue(), record.getSchema()); + } + consume(record); + futures[index] = record.getCompletableFuture(); + index++; } - consume(record); - futures[index] = record.getCompletableFuture(); - index++; - } - if (!kafkaSourceConfig.isAutoCommitEnabled()) { - try { + if (!kafkaSourceConfig.isAutoCommitEnabled()) { CompletableFuture.allOf(futures).get(); consumer.commitSync(); - } catch (InterruptedException ex) { - break; - } catch (ExecutionException ex) { - LOG.error("Error while processing records", ex); - break; } + } catch (Exception e) { + LOG.error("Error while processing records", e); + notifyError(e); + break; } } }); - runnerThread.setUncaughtExceptionHandler( - (t, e) -> LOG.error("[{}] Error while consuming records", t.getName(), e)); + running = true; runnerThread.setName("Kafka Source Thread"); runnerThread.start(); } diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaPushSource.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaPushSource.java new file mode 100644 index 0000000000000..9319518572183 --- /dev/null +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaPushSource.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.io.kafka; + +import java.util.concurrent.LinkedBlockingQueue; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.io.core.Source; + +/** + * Kafka Push Source. + * To maintain compatibility, we can't pick the PIP-281: https://github.com/apache/pulsar/pull/20807 + * cherry-pick to the historical version, so the class is implemented in the kafka connector. + */ +public abstract class KafkaPushSource implements Source { + + private static class NullRecord implements Record { + @Override + public Object getValue() { + return null; + } + } + + private static class ErrorNotifierRecord implements Record { + private Exception e; + public ErrorNotifierRecord(Exception e) { + this.e = e; + } + @Override + public Object getValue() { + return null; + } + + public Exception getException() { + return e; + } + } + + private LinkedBlockingQueue> queue; + private static final int DEFAULT_QUEUE_LENGTH = 1000; + private final NullRecord nullRecord = new NullRecord(); + + public KafkaPushSource() { + this.queue = new LinkedBlockingQueue<>(this.getQueueLength()); + } + + @Override + public Record read() throws Exception { + Record record = queue.take(); + if (record instanceof ErrorNotifierRecord) { + throw ((ErrorNotifierRecord) record).getException(); + } + if (record instanceof NullRecord) { + return null; + } else { + return record; + } + } + + /** + * Send this message to be written to Pulsar. + * Pass null if you you are done with this task + * @param record next message from source which should be sent to a Pulsar topic + */ + public void consume(Record record) { + try { + if (record != null) { + queue.put(record); + } else { + queue.put(nullRecord); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + /** + * Get length of the queue that records are push onto. + * Users can override this method to customize the queue length + * @return queue length + */ + public int getQueueLength() { + return DEFAULT_QUEUE_LENGTH; + } + + /** + * Allows the source to notify errors asynchronously. + * @param ex + */ + public void notifyError(Exception ex) { + consume(new ErrorNotifierRecord(ex)); + } +} diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java index dbbf1a7b5e2a0..4ea5fc897d651 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java @@ -26,6 +26,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -91,12 +93,12 @@ public class KafkaSinkConfig implements Serializable { + " before considering a request complete. This controls the durability of records that are sent.") private String acks; @FieldDoc( - defaultValue = "16384L", + defaultValue = "16384", help = "The batch size that Kafka producer will attempt to batch records together" + " before sending them to brokers.") private long batchSize = 16384L; @FieldDoc( - defaultValue = "1048576L", + defaultValue = "1048576", help = "The maximum size of a Kafka request in bytes.") private long maxRequestSize = 1048576L; @@ -129,8 +131,7 @@ public static KafkaSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), KafkaSinkConfig.class); } - public static KafkaSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), KafkaSinkConfig.class); + public static KafkaSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, KafkaSinkConfig.class, sinkContext); } } \ No newline at end of file diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java index ad2e121d26abf..422659cdda408 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java @@ -27,6 +27,7 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -158,8 +159,14 @@ public static KafkaSourceConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), KafkaSourceConfig.class); } - public static KafkaSourceConfig load(Map map) throws IOException { + public static KafkaSourceConfig load(Map map, SourceContext sourceContext) throws IOException { ObjectMapper mapper = new ObjectMapper(); + // since the KafkaSourceConfig requires the ACCEPT_EMPTY_STRING_AS_NULL_OBJECT feature + // We manually set the sensitive fields here instead of calling `IOConfigUtils.loadWithSecrets` + String sslTruststorePassword = sourceContext.getSecret("sslTruststorePassword"); + if (sslTruststorePassword != null) { + map.put("sslTruststorePassword", sslTruststorePassword); + } mapper.enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT); return mapper.readValue(mapper.writeValueAsString(map), KafkaSourceConfig.class); } diff --git a/pulsar-io/kafka/src/main/resources/findbugsExclude.xml b/pulsar-io/kafka/src/main/resources/findbugsExclude.xml index 3fc8f67c8640e..a48959506a69a 100644 --- a/pulsar-io/kafka/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/kafka/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-kinesis Pulsar IO :: Kinesis - 2.2.8 0.14.0 @@ -37,116 +35,96 @@ 1.9.0 2.3.0 - ${project.groupId} pulsar-io-common ${project.version} - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-io-aws ${project.version} - - org.apache.pulsar + io.streamnative pulsar-functions-instance ${project.version} test - org.apache.commons commons-lang3 - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.dataformat jackson-dataformat-cbor - org.apache.avro avro ${avro.version} - com.github.wnameless.json json-flattener ${json-flattener.version} - com.google.code.gson gson - com.amazonaws aws-java-sdk-core - software.amazon.kinesis amazon-kinesis-client ${amazon-kinesis-client.version} - com.amazonaws amazon-kinesis-producer ${amazon-kinesis-producer.version} - - + com.google.flatbuffers flatbuffers-java ${flatbuffers-java.version} - javax.xml.bind jaxb-api ${jaxb-api.version} - org.testcontainers localstack test - org.awaitility awaitility test - - @@ -155,5 +133,4 @@ - diff --git a/pulsar-io/mongo/pom.xml b/pulsar-io/mongo/pom.xml index 63cdd397f2548..c0bfce38fc3c3 100644 --- a/pulsar-io/mongo/pom.xml +++ b/pulsar-io/mongo/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT - pulsar-io-mongo Pulsar IO :: MongoDB - 4.1.2 - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.parent.groupId} pulsar-io-core @@ -64,7 +64,6 @@ guava - diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java index 35c327ed82b99..74f077da62036 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java @@ -24,7 +24,6 @@ import java.io.Serializable; import lombok.Data; import lombok.experimental.Accessors; -import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -42,6 +41,7 @@ public abstract class MongoAbstractConfig implements Serializable { @FieldDoc( required = true, + sensitive = true, // it may contain password defaultValue = "", help = "The URI of MongoDB that the connector connects to " + "(see: https://docs.mongodb.com/manual/reference/connection-string/)" @@ -95,7 +95,6 @@ public MongoAbstractConfig( } public void validate() { - checkArgument(!StringUtils.isEmpty(getMongoUri()), "Required MongoDB URI is not set."); checkArgument(getBatchSize() > 0, "batchSize must be a positive integer."); checkArgument(getBatchTimeMs() > 0, "batchTimeMs must be a positive long."); } diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java index 2206d232eaf97..61d5aeb697e01 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java @@ -86,7 +86,7 @@ public MongoSink(Supplier clientProvider) { public void open(Map config, SinkContext sinkContext) throws Exception { log.info("Open MongoDB Sink"); - mongoSinkConfig = MongoSinkConfig.load(config); + mongoSinkConfig = MongoSinkConfig.load(config, sinkContext); mongoSinkConfig.validate(); if (clientProvider != null) { diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java index 285f3c97bef1a..9431fe4910800 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java @@ -30,6 +30,8 @@ import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; /** * Configuration class for the MongoDB Sink Connectors. @@ -59,11 +61,8 @@ public static MongoSinkConfig load(String yamlFile) throws IOException { return cfg; } - public static MongoSinkConfig load(Map map) throws IOException { - final ObjectMapper mapper = new ObjectMapper(); - final MongoSinkConfig cfg = mapper.readValue(mapper.writeValueAsString(map), MongoSinkConfig.class); - - return cfg; + public static MongoSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, MongoSinkConfig.class, sinkContext); } @Override diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java index 6ee95fc4cd4b5..68a31b461a51c 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java @@ -79,7 +79,7 @@ public MongoSource(Supplier clientProvider) { public void open(Map config, SourceContext sourceContext) throws Exception { log.info("Open MongoDB Source"); - mongoSourceConfig = MongoSourceConfig.load(config); + mongoSourceConfig = MongoSourceConfig.load(config, sourceContext); mongoSourceConfig.validate(); if (clientProvider != null) { diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java index cf887a93bf3c3..1c0c7f4b3657a 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java @@ -29,6 +29,8 @@ import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -75,12 +77,8 @@ public static MongoSourceConfig load(String yamlFile) throws IOException { return cfg; } - public static MongoSourceConfig load(Map map) throws IOException { - final ObjectMapper mapper = new ObjectMapper(); - final MongoSourceConfig cfg = - mapper.readValue(mapper.writeValueAsString(map), MongoSourceConfig.class); - - return cfg; + public static MongoSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, MongoSourceConfig.class, sourceContext); } /** diff --git a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java index b1166eac5722a..c86e45feb2348 100644 --- a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java +++ b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.io.mongodb; import java.util.Map; +import org.apache.pulsar.io.core.SinkContext; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -34,7 +36,27 @@ public void testLoadMapConfig() throws IOException { commonConfigMap.put("batchSize", TestHelper.BATCH_SIZE); commonConfigMap.put("batchTimeMs", TestHelper.BATCH_TIME); - final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap, sinkContext); + + assertEquals(cfg.getMongoUri(), TestHelper.URI); + assertEquals(cfg.getDatabase(), TestHelper.DB); + assertEquals(cfg.getCollection(), TestHelper.COLL); + assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE); + assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME); + } + + @Test + public void testLoadMapConfigUrlFromSecret() throws IOException { + final Map commonConfigMap = TestHelper.createCommonConfigMap(); + commonConfigMap.put("batchSize", TestHelper.BATCH_SIZE); + commonConfigMap.put("batchTimeMs", TestHelper.BATCH_TIME); + commonConfigMap.remove("mongoUri"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("mongoUri")) + .thenReturn(TestHelper.URI); + final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap, sinkContext); assertEquals(cfg.getMongoUri(), TestHelper.URI); assertEquals(cfg.getDatabase(), TestHelper.DB); @@ -44,12 +66,13 @@ public void testLoadMapConfig() throws IOException { } @Test(expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "Required MongoDB URI is not set.") + expectedExceptionsMessageRegExp = "mongoUri cannot be null") public void testBadMongoUri() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeMongoUri(configMap); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -60,7 +83,8 @@ public void testBadDatabase() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeDatabase(configMap); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -71,7 +95,8 @@ public void testBadCollection() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeCollection(configMap); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -82,7 +107,8 @@ public void testBadBatchSize() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchSize(configMap, 0); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -93,7 +119,8 @@ public void testBadBatchTime() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchTime(configMap, 0L); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } diff --git a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java index e7fd01549b033..528cd0237ef16 100644 --- a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java +++ b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java @@ -23,6 +23,8 @@ import java.io.File; import java.io.IOException; import java.util.Map; +import org.apache.pulsar.io.core.SourceContext; +import org.mockito.Mockito; import org.testng.annotations.Test; public class MongoSourceConfigTest { @@ -32,7 +34,27 @@ public void testLoadMapConfig() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putSyncType(configMap, TestHelper.SYNC_TYPE); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); + + assertEquals(cfg.getMongoUri(), TestHelper.URI); + assertEquals(cfg.getDatabase(), TestHelper.DB); + assertEquals(cfg.getCollection(), TestHelper.COLL); + assertEquals(cfg.getSyncType(), TestHelper.SYNC_TYPE); + assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE); + assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME); + } + + @Test + public void testLoadMapConfigUriFromSecret() throws IOException { + final Map configMap = TestHelper.createCommonConfigMap(); + TestHelper.putSyncType(configMap, TestHelper.SYNC_TYPE); + configMap.remove("mongoUri"); + + SourceContext sourceContext = Mockito.mock(SourceContext.class); + Mockito.when(sourceContext.getSecret("mongoUri")) + .thenReturn(TestHelper.URI); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); assertEquals(cfg.getMongoUri(), TestHelper.URI); assertEquals(cfg.getDatabase(), TestHelper.DB); @@ -43,12 +65,13 @@ public void testLoadMapConfig() throws IOException { } @Test(expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "Required MongoDB URI is not set.") + expectedExceptionsMessageRegExp = "mongoUri cannot be null") public void testBadMongoUri() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeMongoUri(configMap); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } @@ -61,7 +84,8 @@ public void testBadSyncType() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putSyncType(configMap, "wrong_sync_type_str"); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } @@ -72,7 +96,8 @@ public void testBadBatchSize() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchSize(configMap, 0); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } @@ -83,7 +108,8 @@ public void testBadBatchTime() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchTime(configMap, 0L); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } diff --git a/pulsar-io/netty/pom.xml b/pulsar-io/netty/pom.xml index b6959199053e3..8b9d89fe739ba 100644 --- a/pulsar-io/netty/pom.xml +++ b/pulsar-io/netty/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.0.0-SNAPSHOT - - - pulsar-io-netty - Pulsar IO :: Netty - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - io.netty - netty-handler - - - - io.netty - netty-codec-http - - - - com.google.guava - guava - - - - org.apache.commons - commons-lang3 - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.0.0-SNAPSHOT + + pulsar-io-netty + Pulsar IO :: Netty + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + io.netty + netty-handler + + + io.netty + netty-codec-http + + + com.google.guava + guava + + + org.apache.commons + commons-lang3 + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-io/netty/src/main/resources/findbugsExclude.xml b/pulsar-io/netty/src/main/resources/findbugsExclude.xml index 3a52062fc9233..6b8906a775bd3 100644 --- a/pulsar-io/netty/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/netty/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - + + + + + + + + + + + + diff --git a/pulsar-io/nsq/pom.xml b/pulsar-io/nsq/pom.xml index 8f7307843627f..fccec986b8569 100644 --- a/pulsar-io/nsq/pom.xml +++ b/pulsar-io/nsq/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-nsq Pulsar IO :: NSQ - ${project.groupId} pulsar-io-core ${project.version} - com.sproutsocial nsq-j ${nsq-client.version} - org.apache.commons commons-collections4 - org.apache.commons commons-lang3 - ${project.groupId} pulsar-io-common ${project.version} - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - - - - - org.apache.nifi - nifi-nar-maven-plugin - - + + + org.apache.nifi + nifi-nar-maven-plugin + + diff --git a/pulsar-io/pom.xml b/pulsar-io/pom.xml index 2e62509c08651..0d39a02d508d8 100644 --- a/pulsar-io/pom.xml +++ b/pulsar-io/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-io Pulsar IO :: Parent - main @@ -77,7 +75,6 @@ alluxio - pulsar-io-tests @@ -85,22 +82,16 @@ batch-discovery-triggerers batch-data-generator common - docs aws twitter cassandra aerospike http - kafka rabbitmq kinesis hdfs3 jdbc data-generator - elastic-search - kafka-connect-adaptor - kafka-connect-adaptor-nar - debezium hdfs2 canal file @@ -116,7 +107,25 @@ alluxio - + + pulsar-io-elastic-tests + + core + common + elastic-search + + + + pulsar-io-kafka-connect-tests + + core + common + kafka + kafka-connect-adaptor + kafka-connect-adaptor-nar + debezium + + core-modules @@ -130,7 +139,6 @@ - @@ -148,5 +156,4 @@ - diff --git a/pulsar-io/rabbitmq/pom.xml b/pulsar-io/rabbitmq/pom.xml index 8fc0535c16089..43888fee0dc4f 100644 --- a/pulsar-io/rabbitmq/pom.xml +++ b/pulsar-io/rabbitmq/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-rabbitmq Pulsar IO :: RabbitMQ - - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core @@ -53,28 +55,23 @@ pulsar-client-original ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.google.guava guava - com.rabbitmq amqp-client ${rabbitmq-client.version} - org.apache.qpid qpid-broker @@ -86,9 +83,7 @@ awaitility test - - @@ -97,5 +92,4 @@ - diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java index f317a35734e69..89192c42346e8 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java @@ -53,7 +53,7 @@ public class RabbitMQSink implements Sink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - rabbitMQSinkConfig = RabbitMQSinkConfig.load(config); + rabbitMQSinkConfig = RabbitMQSinkConfig.load(config, sinkContext); rabbitMQSinkConfig.validate(); ConnectionFactory connectionFactory = rabbitMQSinkConfig.createConnectionFactory(); diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java index c1f8d6b8ad3d3..39f97e5e460c8 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java @@ -20,7 +20,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; -import com.google.common.base.Preconditions; import java.io.File; import java.io.IOException; import java.io.Serializable; @@ -28,6 +27,8 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -60,14 +61,12 @@ public static RabbitMQSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), RabbitMQSinkConfig.class); } - public static RabbitMQSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), RabbitMQSinkConfig.class); + public static RabbitMQSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, RabbitMQSinkConfig.class, sinkContext); } @Override public void validate() { super.validate(); - Preconditions.checkNotNull(exchangeName, "exchangeName property not set."); } } diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java index d15108c4d8288..b0b7ef31b08de 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java @@ -54,7 +54,7 @@ public class RabbitMQSource extends PushSource { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - rabbitMQSourceConfig = RabbitMQSourceConfig.load(config); + rabbitMQSourceConfig = RabbitMQSourceConfig.load(config, sourceContext); rabbitMQSourceConfig.validate(); ConnectionFactory connectionFactory = rabbitMQSourceConfig.createConnectionFactory(); diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java index f24018e70da13..01e23a7146080 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java @@ -28,6 +28,8 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -66,9 +68,8 @@ public static RabbitMQSourceConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), RabbitMQSourceConfig.class); } - public static RabbitMQSourceConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), RabbitMQSourceConfig.class); + public static RabbitMQSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, RabbitMQSourceConfig.class, sourceContext); } @Override diff --git a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java index 3d4fd6f46e16f..8706cb567524f 100644 --- a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java +++ b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.rabbitmq.sink; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.rabbitmq.RabbitMQSinkConfig; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -71,7 +73,45 @@ public final void loadFromMapTest() throws IOException { map.put("exchangeName", "test-exchange"); map.put("exchangeType", "test-exchange-type"); - RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals(config.getHost(), "localhost"); + assertEquals(config.getPort(), Integer.parseInt("5673")); + assertEquals(config.getVirtualHost(), "/"); + assertEquals(config.getUsername(), "guest"); + assertEquals(config.getPassword(), "guest"); + assertEquals(config.getConnectionName(), "test-connection"); + assertEquals(config.getRequestedChannelMax(), Integer.parseInt("0")); + assertEquals(config.getRequestedFrameMax(), Integer.parseInt("0")); + assertEquals(config.getConnectionTimeout(), Integer.parseInt("60000")); + assertEquals(config.getHandshakeTimeout(), Integer.parseInt("10000")); + assertEquals(config.getRequestedHeartbeat(), Integer.parseInt("60")); + assertEquals(config.getExchangeName(), "test-exchange"); + assertEquals(config.getExchangeType(), "test-exchange-type"); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("host", "localhost"); + map.put("port", "5673"); + map.put("virtualHost", "/"); + map.put("connectionName", "test-connection"); + map.put("requestedChannelMax", "0"); + map.put("requestedFrameMax", "0"); + map.put("connectionTimeout", "60000"); + map.put("handshakeTimeout", "10000"); + map.put("requestedHeartbeat", "60"); + map.put("exchangeName", "test-exchange"); + map.put("exchangeType", "test-exchange-type"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("username")) + .thenReturn("guest"); + Mockito.when(sinkContext.getSecret("password")) + .thenReturn("guest"); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals(config.getHost(), "localhost"); assertEquals(config.getPort(), Integer.parseInt("5673")); @@ -105,12 +145,13 @@ public final void validValidateTest() throws IOException { map.put("exchangeName", "test-exchange"); map.put("exchangeType", "test-exchange-type"); - RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "exchangeName property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "exchangeName cannot be null") public final void missingExchangeValidateTest() throws IOException { Map map = new HashMap<>(); map.put("host", "localhost"); @@ -126,7 +167,8 @@ public final void missingExchangeValidateTest() throws IOException { map.put("requestedHeartbeat", "60"); map.put("exchangeType", "test-exchange-type"); - RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); config.validate(); } diff --git a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java index c33e0070c6fd0..43a90062fa453 100644 --- a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java +++ b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.rabbitmq.source; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.rabbitmq.RabbitMQSourceConfig; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -76,7 +78,50 @@ public final void loadFromMapTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "true"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); + assertNotNull(config); + assertEquals("localhost", config.getHost()); + assertEquals(Integer.parseInt("5672"), config.getPort()); + assertEquals("/", config.getVirtualHost()); + assertEquals("guest", config.getUsername()); + assertEquals("guest", config.getPassword()); + assertEquals("test-queue", config.getQueueName()); + assertEquals("test-connection", config.getConnectionName()); + assertEquals(Integer.parseInt("0"), config.getRequestedChannelMax()); + assertEquals(Integer.parseInt("0"), config.getRequestedFrameMax()); + assertEquals(Integer.parseInt("60000"), config.getConnectionTimeout()); + assertEquals(Integer.parseInt("10000"), config.getHandshakeTimeout()); + assertEquals(Integer.parseInt("60"), config.getRequestedHeartbeat()); + assertEquals(Integer.parseInt("0"), config.getPrefetchCount()); + assertEquals(false, config.isPrefetchGlobal()); + assertEquals(false, config.isPrefetchGlobal()); + assertEquals(true, config.isPassive()); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("host", "localhost"); + map.put("port", "5672"); + map.put("virtualHost", "/"); + map.put("queueName", "test-queue"); + map.put("connectionName", "test-connection"); + map.put("requestedChannelMax", "0"); + map.put("requestedFrameMax", "0"); + map.put("connectionTimeout", "60000"); + map.put("handshakeTimeout", "10000"); + map.put("requestedHeartbeat", "60"); + map.put("prefetchCount", "0"); + map.put("prefetchGlobal", "false"); + map.put("passive", "true"); + + SourceContext sourceContext = Mockito.mock(SourceContext.class); + Mockito.when(sourceContext.getSecret("username")) + .thenReturn("guest"); + Mockito.when(sourceContext.getSecret("password")) + .thenReturn("guest"); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); assertNotNull(config); assertEquals("localhost", config.getHost()); assertEquals(Integer.parseInt("5672"), config.getPort()); @@ -115,12 +160,13 @@ public final void validValidateTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "false"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "host property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "host cannot be null") public final void missingHostValidateTest() throws IOException { Map map = new HashMap<>(); map.put("port", "5672"); @@ -138,7 +184,8 @@ public final void missingHostValidateTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "false"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); config.validate(); } @@ -162,7 +209,8 @@ public final void invalidPrefetchCountTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "false"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); config.validate(); } diff --git a/pulsar-io/redis/pom.xml b/pulsar-io/redis/pom.xml index 3f04f6645697f..69daa0aed3f28 100644 --- a/pulsar-io/redis/pom.xml +++ b/pulsar-io/redis/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.0.0-SNAPSHOT - - - pulsar-io-redis - Pulsar IO :: Redis - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - io.lettuce - lettuce-core - 5.0.2.RELEASE - - - com.google.guava - guava - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-collections4 - - - com.github.kstyrc - embedded-redis - 0.6 - test - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - \ No newline at end of file + + 4.0.0 + + pulsar-io + io.streamnative + 3.0.0-SNAPSHOT + + pulsar-io-redis + Pulsar IO :: Redis + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-functions-instance + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + io.lettuce + lettuce-core + 5.0.2.RELEASE + + + com.google.guava + guava + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-collections4 + + + com.github.kstyrc + embedded-redis + 0.6 + test + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + diff --git a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java index 978e7de31a51c..89ec684dded72 100644 --- a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java +++ b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java @@ -88,13 +88,11 @@ public class RedisAbstractConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "10000L", + defaultValue = "10000", help = "The amount of time in milliseconds to wait before timing out when connecting") private long connectTimeout = 10000L; public void validate() { - Preconditions.checkNotNull(redisHosts, "redisHosts property not set."); - Preconditions.checkNotNull(redisDatabase, "redisDatabase property not set."); Preconditions.checkNotNull(clientMode, "clientMode property not set."); } @@ -105,7 +103,6 @@ public enum ClientMode { public List getHostAndPorts() { List hostAndPorts = Lists.newArrayList(); - Preconditions.checkNotNull(redisHosts, "redisHosts property not set."); String[] hosts = StringUtils.split(redisHosts, ","); for (String host : hosts) { HostAndPort hostAndPort = HostAndPort.fromString(host); diff --git a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java index bff0a5c2da592..ebd6e9dbab272 100644 --- a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java +++ b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java @@ -68,7 +68,7 @@ public class RedisSink implements Sink { public void open(Map config, SinkContext sinkContext) throws Exception { log.info("Open Redis Sink"); - redisSinkConfig = RedisSinkConfig.load(config); + redisSinkConfig = RedisSinkConfig.load(config, sinkContext); redisSinkConfig.validate(); redisSession = RedisSession.create(redisSinkConfig); diff --git a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java index a9db66812a475..f7a70cb65a826 100644 --- a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java +++ b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java @@ -28,6 +28,8 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; import org.apache.pulsar.io.redis.RedisAbstractConfig; @@ -40,13 +42,13 @@ public class RedisSinkConfig extends RedisAbstractConfig implements Serializable @FieldDoc( required = false, - defaultValue = "10000L", + defaultValue = "10000", help = "The amount of time in milliseconds before an operation is marked as timed out") private long operationTimeout = 10000L; @FieldDoc( required = false, - defaultValue = "1000L", + defaultValue = "1000", help = "The Redis operation time in milliseconds") private long batchTimeMs = 1000L; @@ -62,9 +64,8 @@ public static RedisSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), RedisSinkConfig.class); } - public static RedisSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), RedisSinkConfig.class); + public static RedisSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, RedisSinkConfig.class, sinkContext); } @Override diff --git a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java index 1316d0994a1cd..39fc6e540c242 100644 --- a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java +++ b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.redis.sink; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.redis.RedisAbstractConfig; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -62,7 +64,34 @@ public final void loadFromMapTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals(config.getRedisHosts(), "localhost:6379"); + assertEquals(config.getRedisPassword(), "fake@123"); + assertEquals(config.getRedisDatabase(), Integer.parseInt("1")); + assertEquals(config.getClientMode(), "Standalone"); + assertEquals(config.getOperationTimeout(), Long.parseLong("2000")); + assertEquals(config.getBatchSize(), Integer.parseInt("100")); + assertEquals(config.getBatchTimeMs(), Long.parseLong("1000")); + assertEquals(config.getConnectTimeout(), Long.parseLong("3000")); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap(); + map.put("redisHosts", "localhost:6379"); + map.put("redisDatabase", "1"); + map.put("clientMode", "Standalone"); + map.put("operationTimeout", "2000"); + map.put("batchSize", "100"); + map.put("batchTimeMs", "1000"); + map.put("connectTimeout", "3000"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("redisPassword")) + .thenReturn("fake@123"); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals(config.getRedisHosts(), "localhost:6379"); assertEquals(config.getRedisPassword(), "fake@123"); @@ -86,12 +115,13 @@ public final void validValidateTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "redisHosts property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "redisHosts cannot be null") public final void missingValidValidateTableNameTest() throws IOException { Map map = new HashMap(); map.put("redisPassword", "fake@123"); @@ -102,7 +132,8 @@ public final void missingValidValidateTableNameTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); } @@ -119,7 +150,8 @@ public final void invalidBatchTimeMsTest() throws IOException { map.put("batchTimeMs", "-100"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); } @@ -136,7 +168,8 @@ public final void invalidClientModeTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); RedisAbstractConfig.ClientMode.valueOf(config.getClientMode().toUpperCase()); diff --git a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java index 214151345b42c..2b407fafa5e04 100644 --- a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java +++ b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java @@ -21,7 +21,9 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.instance.SinkRecord; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.redis.EmbeddedRedisUtils; +import org.mockito.Mockito; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -66,7 +68,8 @@ public void TestOpenAndWriteSink() throws Exception { Record record = build("fakeTopic", "fakeKey", "fakeValue"); // open should success - sink.open(configs, null); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + sink.open(configs, sinkContext); // write should success. sink.write(record); diff --git a/pulsar-io/solr/pom.xml b/pulsar-io/solr/pom.xml index 4ef67a31ec33c..6f68e52bf21d3 100644 --- a/pulsar-io/solr/pom.xml +++ b/pulsar-io/solr/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.0.0-SNAPSHOT - - - - 8.11.1 - - - pulsar-io-solr - Pulsar IO :: Solr - - - - ${project.parent.groupId} - pulsar-io-core - ${project.parent.version} - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - org.apache.solr - solr-solrj - ${solr.version} - - - org.apache.solr - solr-core - ${solr.version} - test - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-collections4 - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - - - \ No newline at end of file + + 4.0.0 + + pulsar-io + io.streamnative + 3.0.0-SNAPSHOT + + + 8.11.3 + + pulsar-io-solr + Pulsar IO :: Solr + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.parent.groupId} + pulsar-io-core + ${project.parent.version} + + + ${project.groupId} + pulsar-functions-instance + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + org.apache.solr + solr-solrj + ${solr.version} + + + org.apache.solr + solr-core + ${solr.version} + + + jose4j + org.bitbucket.b_c + + + test + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-collections4 + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + + diff --git a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java index de9cdb4a9d82a..202c782c14c49 100644 --- a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java +++ b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java @@ -48,7 +48,7 @@ public abstract class SolrAbstractSink implements Sink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - solrSinkConfig = SolrSinkConfig.load(config); + solrSinkConfig = SolrSinkConfig.load(config, sinkContext); solrSinkConfig.validate(); enableBasicAuth = !Strings.isNullOrEmpty(solrSinkConfig.getUsername()); diff --git a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java index 02733d230bdcb..daa93a366b110 100644 --- a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java +++ b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java @@ -27,6 +27,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -84,9 +86,8 @@ public static SolrSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), SolrSinkConfig.class); } - public static SolrSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), SolrSinkConfig.class); + public static SolrSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, SolrSinkConfig.class, sinkContext); } public void validate() { diff --git a/pulsar-io/solr/src/main/resources/findbugsExclude.xml b/pulsar-io/solr/src/main/resources/findbugsExclude.xml index f49ff86c90caf..47c3d73af5f06 100644 --- a/pulsar-io/solr/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/solr/src/main/resources/findbugsExclude.xml @@ -1,22 +1,23 @@ - - - \ No newline at end of file + + + + diff --git a/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java b/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java index 42d2121dbfcbd..2c2447a637d35 100644 --- a/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java +++ b/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.io.solr; import com.google.common.collect.Lists; +import org.apache.pulsar.io.core.SinkContext; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -61,7 +63,31 @@ public final void loadFromMapTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals(config.getSolrUrl(), "localhost:2181,localhost:2182/chroot"); + assertEquals(config.getSolrMode(), "SolrCloud"); + assertEquals(config.getSolrCollection(), "techproducts"); + assertEquals(config.getSolrCommitWithinMs(), Integer.parseInt("100")); + assertEquals(config.getUsername(), "fakeuser"); + assertEquals(config.getPassword(), "fake@123"); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("solrUrl", "localhost:2181,localhost:2182/chroot"); + map.put("solrMode", "SolrCloud"); + map.put("solrCollection", "techproducts"); + map.put("solrCommitWithinMs", "100"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("username")) + .thenReturn("fakeuser"); + Mockito.when(sinkContext.getSecret("password")) + .thenReturn("fake@123"); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals(config.getSolrUrl(), "localhost:2181,localhost:2182/chroot"); assertEquals(config.getSolrMode(), "SolrCloud"); @@ -81,12 +107,13 @@ public final void validValidateTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "solrUrl property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "solrUrl cannot be null") public final void missingValidValidateSolrModeTest() throws IOException { Map map = new HashMap<>(); map.put("solrMode", "SolrCloud"); @@ -95,7 +122,8 @@ public final void missingValidValidateSolrModeTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); } @@ -110,7 +138,8 @@ public final void invalidBatchTimeMsTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); } @@ -125,7 +154,8 @@ public final void invalidClientModeTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); SolrAbstractSink.SolrMode.valueOf(config.getSolrMode().toUpperCase()); @@ -141,7 +171,8 @@ public final void validZkChrootTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); String url = config.getSolrUrl(); diff --git a/pulsar-io/solr/src/test/resources/solr.xml b/pulsar-io/solr/src/test/resources/solr.xml index 495a71f72f3f2..9ce4d37f4beef 100644 --- a/pulsar-io/solr/src/test/resources/solr.xml +++ b/pulsar-io/solr/src/test/resources/solr.xml @@ -1,3 +1,4 @@ + - - ${host:} - ${jetty.port:8983} - ${hostContext:solr} - ${genericCoreNodeNames:true} - ${zkClientTimeout:30000} - ${distribUpdateSoTimeout:600000} - ${distribUpdateConnTimeout:60000} - ${zkCredentialsProvider:org.apache.solr.common.cloud.DefaultZkCredentialsProvider} - ${zkACLProvider:org.apache.solr.common.cloud.DefaultZkACLProvider} - - - ${socketTimeout:600000} - ${connTimeout:60000} - - \ No newline at end of file + + ${host:} + ${jetty.port:8983} + ${hostContext:solr} + ${genericCoreNodeNames:true} + ${zkClientTimeout:30000} + ${distribUpdateSoTimeout:600000} + ${distribUpdateConnTimeout:60000} + ${zkCredentialsProvider:org.apache.solr.common.cloud.DefaultZkCredentialsProvider} + ${zkACLProvider:org.apache.solr.common.cloud.DefaultZkACLProvider} + + + ${socketTimeout:600000} + ${connTimeout:60000} + + diff --git a/pulsar-io/twitter/pom.xml b/pulsar-io/twitter/pom.xml index 901f8639a4326..1314bf881e79d 100644 --- a/pulsar-io/twitter/pom.xml +++ b/pulsar-io/twitter/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.0.0-SNAPSHOT - pulsar-io-twitter Pulsar IO :: Twitter - - ${project.groupId} pulsar-io-core ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.twitter hbc-core ${hbc-core.version} - org.apache.commons commons-collections4 - org.apache.commons commons-lang3 - - ${project.groupId} - pulsar-io-common - ${project.version} + ${project.groupId} + pulsar-io-common + ${project.version} - - @@ -80,5 +69,4 @@ - diff --git a/pulsar-metadata/pom.xml b/pulsar-metadata/pom.xml index 4096a1ee0f994..cc0f22a2db64e 100644 --- a/pulsar-metadata/pom.xml +++ b/pulsar-metadata/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT .. - pulsar-metadata Pulsar Metadata - org.apache.pulsar + io.streamnative pulsar-common ${project.version} - org.apache.zookeeper zookeeper @@ -56,56 +53,69 @@ - - io.dropwizard.metrics metrics-core test - - + + org.apache.zookeeper + zookeeper + tests + test + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + + io.netty + netty-tcnative + + + + + ${project.groupId} + testmocks + ${project.version} + test + org.xerial.snappy snappy-java test - org.awaitility awaitility test - org.apache.bookkeeper bookkeeper-server - io.etcd jetcd-core - - io.etcd jetcd-test test - com.github.ben-manes.caffeine caffeine - io.prometheus simpleclient - - @@ -125,7 +135,6 @@ - org.apache.maven.plugins maven-jar-plugin diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java index 94da382b74dcf..6d558e709716d 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java @@ -148,6 +148,11 @@ public interface MetadataCache { */ void invalidate(String path); + /** + * Force the invalidation of all object in the metadata cache. + */ + void invalidateAll(); + /** * Invalidate and reload an object in the metadata cache. * diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java index a33c8761cba9e..4db7f4798c309 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java @@ -206,10 +206,11 @@ protected void asyncProcessLedgersInSingleNode( mcb = new BookkeeperInternalCallbacks.MultiCallback(activeLedgers.size(), finalCb, ctx, successRc, failureRc); // start loop over all ledgers - for (Long ledger : activeLedgers) { - processor.process(ledger, mcb); - } - + scheduler.submit(() -> { + for (Long ledger : activeLedgers) { + processor.process(ledger, mcb); + } + }); }).exceptionally(ex -> { finalCb.processResult(failureRc, null, ctx); return null; diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java index a4336b876398a..ee06930b3c880 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java @@ -30,7 +30,7 @@ import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; -class PulsarLayoutManager implements LayoutManager { +public class PulsarLayoutManager implements LayoutManager { @Getter(AccessLevel.PACKAGE) private final MetadataStoreExtended store; @@ -40,7 +40,7 @@ class PulsarLayoutManager implements LayoutManager { private final String layoutPath; - PulsarLayoutManager(MetadataStoreExtended store, String ledgersRootPath) { + public PulsarLayoutManager(MetadataStoreExtended store, String ledgersRootPath) { this.ledgersRootPath = ledgersRootPath; this.store = store; this.layoutPath = ledgersRootPath + "/" + BookKeeperConstants.LAYOUT_ZNODE; diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java index bc35380fec19c..44870ed47f05b 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java @@ -27,17 +27,19 @@ import org.apache.pulsar.metadata.api.coordination.LeaderElection; import org.apache.pulsar.metadata.api.coordination.LeaderElectionState; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.api.extended.SessionEvent; import org.apache.pulsar.metadata.coordination.impl.CoordinationServiceImpl; @Slf4j -class PulsarLedgerAuditorManager implements LedgerAuditorManager { +public class PulsarLedgerAuditorManager implements LedgerAuditorManager { - private static final String ELECTION_PATH = "leader"; + public static final String ELECTION_PATH = "leader"; private final CoordinationService coordinationService; private final LeaderElection leaderElection; private LeaderElectionState leaderElectionState; private String bookieId; + private boolean sessionExpired = false; PulsarLedgerAuditorManager(MetadataStoreExtended store, String ledgersRoot) { this.coordinationService = new CoordinationServiceImpl(store); @@ -47,6 +49,14 @@ class PulsarLedgerAuditorManager implements LedgerAuditorManager { this.leaderElection = coordinationService.getLeaderElection(String.class, electionPath, this::handleStateChanges); this.leaderElectionState = LeaderElectionState.NoLeader; + store.registerSessionListener(event -> { + if (SessionEvent.SessionLost == event) { + synchronized (this) { + sessionExpired = true; + notifyAll(); + } + } + }); } private void handleStateChanges(LeaderElectionState state) { @@ -71,6 +81,9 @@ public void tryToBecomeAuditor(String bookieId, Consumer listener) while (true) { try { synchronized (this) { + if (sessionExpired) { + throw new IllegalStateException("Zookeeper session expired, give up to become auditor."); + } if (leaderElectionState == LeaderElectionState.Leading) { return; } else { diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java index 59452a3d54db3..b003c656353c0 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java @@ -377,7 +377,7 @@ public void close() throws IOException { } } - private String getLedgerPath(long ledgerId) { + public String getLedgerPath(long ledgerId) { return this.ledgerRootPath + StringUtils.getHybridHierarchicalLedgerPath(ledgerId); } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java index cf8ff208c94b6..343c3165ec7e9 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java @@ -25,10 +25,12 @@ import static org.apache.bookkeeper.proto.DataFormats.PlacementPolicyCheckFormat; import static org.apache.bookkeeper.proto.DataFormats.ReplicasCheckFormat; import static org.apache.bookkeeper.proto.DataFormats.UnderreplicatedLedgerFormat; +import com.google.common.base.Joiner; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.TextFormat; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -61,6 +63,8 @@ import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.CreateOption; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.KeeperException; @Slf4j public class PulsarLedgerUnderreplicationManager implements LedgerUnderreplicationManager { @@ -98,14 +102,17 @@ long getLedgerNodeVersion() { private final String urLockPath; private final String layoutPath; private final String lostBookieRecoveryDelayPath; + private final String replicationDisablePath; private final String checkAllLedgersCtimePath; private final String placementPolicyCheckCtimePath; private final String replicasCheckCtimePath; private final MetadataStoreExtended store; - private BookkeeperInternalCallbacks.GenericCallback replicationEnabledListener; - private BookkeeperInternalCallbacks.GenericCallback lostBookieRecoveryDelayListener; + private final List> replicationEnabledCallbacks = + new ArrayList<>(); + private final List> lostBookieRecoveryDelayCallbacks = + new ArrayList<>(); private static class PulsarUnderreplicatedLedger extends UnderreplicatedLedger { PulsarUnderreplicatedLedger(long ledgerId) { @@ -132,6 +139,7 @@ public PulsarLedgerUnderreplicationManager(AbstractConfiguration conf, Metada urLedgerPath = basePath + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; urLockPath = basePath + '/' + BookKeeperConstants.UNDER_REPLICATION_LOCK; lostBookieRecoveryDelayPath = basePath + '/' + BookKeeperConstants.LOSTBOOKIERECOVERYDELAY_NODE; + replicationDisablePath = basePath + '/' + BookKeeperConstants.DISABLE_NODE; checkAllLedgersCtimePath = basePath + '/' + BookKeeperConstants.CHECK_ALL_LEDGERS_CTIME; placementPolicyCheckCtimePath = basePath + '/' + BookKeeperConstants.PLACEMENT_POLICY_CHECK_CTIME; replicasCheckCtimePath = basePath + '/' + BookKeeperConstants.REPLICAS_CHECK_CTIME; @@ -225,17 +233,34 @@ private void handleNotification(Notification n) { synchronized (this) { // Notify that there were some changes on the under-replicated z-nodes notifyAll(); - - if (n.getType() == NotificationType.Deleted) { - if (n.getPath().equals(basePath + '/' + BookKeeperConstants.DISABLE_NODE)) { - log.info("LedgerReplication is enabled externally through MetadataStore, " - + "since DISABLE_NODE ZNode is deleted"); - if (replicationEnabledListener != null) { - replicationEnabledListener.operationComplete(0, null); + if (lostBookieRecoveryDelayPath.equals(n.getPath())) { + final List> callbackList; + synchronized (lostBookieRecoveryDelayCallbacks) { + callbackList = new ArrayList<>(lostBookieRecoveryDelayCallbacks); + lostBookieRecoveryDelayCallbacks.clear(); + } + for (BookkeeperInternalCallbacks.GenericCallback callback : callbackList) { + try { + callback.operationComplete(0, null); + } catch (Exception e) { + log.warn("lostBookieRecoveryDelayCallbacks handle error", e); } - } else if (n.getPath().equals(lostBookieRecoveryDelayPath)) { - if (lostBookieRecoveryDelayListener != null) { - lostBookieRecoveryDelayListener.operationComplete(0, null); + } + return; + } + if (replicationDisablePath.equals(n.getPath()) && n.getType() == NotificationType.Deleted) { + log.info("LedgerReplication is enabled externally through MetadataStore, " + + "since DISABLE_NODE ZNode is deleted"); + final List> callbackList; + synchronized (replicationEnabledCallbacks) { + callbackList = new ArrayList<>(replicationEnabledCallbacks); + replicationEnabledCallbacks.clear(); + } + for (BookkeeperInternalCallbacks.GenericCallback callback : callbackList) { + try { + callback.operationComplete(0, null); + } catch (Exception e) { + log.warn("replicationEnabledCallbacks handle error", e); } } } @@ -393,6 +418,31 @@ public void markLedgerReplicated(long ledgerId) throws ReplicationException.Unav Lock l = heldLocks.get(ledgerId); if (l != null) { store.delete(getUrLedgerPath(ledgerId), Optional.of(l.getLedgerNodeVersion())).get(); + if (store instanceof ZKMetadataStore) { + try { + // clean up the hierarchy + String[] parts = getUrLedgerPath(ledgerId).split("/"); + for (int i = 1; i <= 4; i++) { + String[] p = Arrays.copyOf(parts, parts.length - i); + String path = Joiner.on("/").join(p); + Optional getResult = store.get(path).get(); + if (getResult.isPresent()) { + store.delete(path, Optional.of(getResult.get().getStat().getVersion())).get(); + } + } + } catch (ExecutionException ee) { + // This can happen when cleaning up the hierarchy. + // It's safe to ignore, it simply means another + // ledger in the same hierarchy has been marked as + // underreplicated. + if (ee.getCause() instanceof MetadataStoreException && ee.getCause().getCause() + instanceof KeeperException.NotEmptyException) { + //do nothing. + } else { + log.warn("Error deleting underrepcalited ledger parent node", ee); + } + } + } } } catch (ExecutionException ee) { if (ee.getCause() instanceof MetadataStoreException.NotFoundException) { @@ -642,8 +692,7 @@ public void disableLedgerReplication() log.debug("disableLedegerReplication()"); } try { - String path = basePath + '/' + BookKeeperConstants.DISABLE_NODE; - store.put(path, "".getBytes(UTF_8), Optional.of(-1L)).get(); + store.put(replicationDisablePath, "".getBytes(UTF_8), Optional.of(-1L)).get(); log.info("Auto ledger re-replication is disabled!"); } catch (ExecutionException ee) { log.error("Exception while stopping auto ledger re-replication", ee); @@ -663,7 +712,7 @@ public void enableLedgerReplication() log.debug("enableLedegerReplication()"); } try { - store.delete(basePath + '/' + BookKeeperConstants.DISABLE_NODE, Optional.empty()).get(); + store.delete(replicationDisablePath, Optional.empty()).get(); log.info("Resuming automatic ledger re-replication"); } catch (ExecutionException ee) { log.error("Exception while resuming ledger replication", ee); @@ -683,7 +732,7 @@ public boolean isLedgerReplicationEnabled() log.debug("isLedgerReplicationEnabled()"); } try { - return !store.exists(basePath + '/' + BookKeeperConstants.DISABLE_NODE).get(); + return !store.exists(replicationDisablePath).get(); } catch (ExecutionException ee) { log.error("Error while checking the state of " + "ledger re-replication", ee); @@ -702,13 +751,11 @@ public void notifyLedgerReplicationEnabled(final BookkeeperInternalCallbacks.Gen if (log.isDebugEnabled()) { log.debug("notifyLedgerReplicationEnabled()"); } - - synchronized (this) { - replicationEnabledListener = cb; + synchronized (replicationEnabledCallbacks) { + replicationEnabledCallbacks.add(cb); } - try { - if (!store.exists(basePath + '/' + BookKeeperConstants.DISABLE_NODE).get()) { + if (!store.exists(replicationDisablePath).get()) { log.info("LedgerReplication is enabled externally through metadata store, " + "since DISABLE_NODE node is deleted"); cb.operationComplete(0, null); @@ -797,8 +844,8 @@ public int getLostBookieRecoveryDelay() throws ReplicationException.UnavailableE public void notifyLostBookieRecoveryDelayChanged(BookkeeperInternalCallbacks.GenericCallback cb) throws ReplicationException.UnavailableException { log.debug("notifyLostBookieRecoveryDelayChanged()"); - synchronized (this) { - lostBookieRecoveryDelayListener = cb; + synchronized (lostBookieRecoveryDelayCallbacks) { + lostBookieRecoveryDelayCallbacks.add(cb); } try { if (!store.exists(lostBookieRecoveryDelayPath).get()) { @@ -978,11 +1025,13 @@ public long getReplicasCheckCTime() throws ReplicationException.UnavailableExcep @Override public void notifyUnderReplicationLedgerChanged(BookkeeperInternalCallbacks.GenericCallback cb) throws ReplicationException.UnavailableException { - log.debug("notifyUnderReplicationLedgerChanged()"); - store.registerListener(e -> { - if (e.getType() == NotificationType.Deleted && ID_EXTRACTION_PATTERN.matcher(e.getPath()).find()) { - cb.operationComplete(0, null); - } - }); + //The store listener callback executor is metadata-store executor, + //in cb.operationComplete(0, null), it will get all underreplication ledgers from metadata-store, it's sync + //operation. So it's a deadlock. +// store.registerListener(e -> { +// if (e.getType() == NotificationType.Deleted && ID_EXTRACTION_PATTERN.matcher(e.getPath()).find()) { +// cb.operationComplete(0, null); +// } +// }); } } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClient.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClient.java index a32625926e72c..be945d988fb88 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClient.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClient.java @@ -18,13 +18,18 @@ */ package org.apache.pulsar.metadata.bookkeeper; +import static java.util.concurrent.CompletableFuture.completedFuture; +import static java.util.concurrent.CompletableFuture.failedFuture; import static org.apache.bookkeeper.util.BookKeeperConstants.AVAILABLE_NODE; import static org.apache.bookkeeper.util.BookKeeperConstants.COOKIE_NODE; import static org.apache.bookkeeper.util.BookKeeperConstants.READONLY; +import static org.apache.pulsar.common.util.FutureUtil.Sequencer; +import static org.apache.pulsar.common.util.FutureUtil.waitForAll; import io.netty.util.concurrent.DefaultThreadFactory; import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -42,34 +47,39 @@ import org.apache.bookkeeper.versioning.Version; import org.apache.bookkeeper.versioning.Versioned; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.metadata.api.CacheGetResult; import org.apache.pulsar.metadata.api.MetadataCache; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.Notification; -import org.apache.pulsar.metadata.api.NotificationType; +import org.apache.pulsar.metadata.api.extended.SessionEvent; +import org.apache.pulsar.metadata.impl.AbstractMetadataStore; @Slf4j public class PulsarRegistrationClient implements RegistrationClient { - private final MetadataStore store; + private final AbstractMetadataStore store; private final String ledgersRootPath; // registration paths private final String bookieRegistrationPath; private final String bookieAllRegistrationPath; private final String bookieReadonlyRegistrationPath; - - private final ConcurrentHashMap> bookieServiceInfoCache = - new ConcurrentHashMap(); private final Set writableBookiesWatchers = new CopyOnWriteArraySet<>(); private final Set readOnlyBookiesWatchers = new CopyOnWriteArraySet<>(); private final MetadataCache bookieServiceInfoMetadataCache; private final ScheduledExecutorService executor; + private final Map> writableBookieInfo; + private final Map> readOnlyBookieInfo; + private final FutureUtil.Sequencer sequencer; + private SessionEvent lastMetadataSessionEvent; public PulsarRegistrationClient(MetadataStore store, String ledgersRootPath) { - this.store = store; + this.store = (AbstractMetadataStore) store; this.ledgersRootPath = ledgersRootPath; this.bookieServiceInfoMetadataCache = store.getMetadataCache(BookieServiceInfoSerde.INSTANCE); - + this.sequencer = Sequencer.create(); + this.writableBookieInfo = new ConcurrentHashMap<>(); + this.readOnlyBookieInfo = new ConcurrentHashMap<>(); // Following Bookie Network Address Changes is an expensive operation // as it requires additional ZooKeeper watches // we can disable this feature, in case the BK cluster has only @@ -77,11 +87,11 @@ public PulsarRegistrationClient(MetadataStore store, this.bookieRegistrationPath = ledgersRootPath + "/" + AVAILABLE_NODE; this.bookieAllRegistrationPath = ledgersRootPath + "/" + COOKIE_NODE; this.bookieReadonlyRegistrationPath = this.bookieRegistrationPath + "/" + READONLY; - this.executor = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-registration-client")); store.registerListener(this::updatedBookies); + this.store.registerSessionListener(this::refreshBookies); } @Override @@ -89,40 +99,79 @@ public void close() { executor.shutdownNow(); } + private void refreshBookies(SessionEvent sessionEvent) { + lastMetadataSessionEvent = sessionEvent; + if (!SessionEvent.Reconnected.equals(sessionEvent) && !SessionEvent.SessionReestablished.equals(sessionEvent)){ + return; + } + // Clean caches. + store.invalidateCaches(bookieRegistrationPath, bookieAllRegistrationPath, bookieReadonlyRegistrationPath); + bookieServiceInfoMetadataCache.invalidateAll(); + // Refresh caches of the listeners. + getReadOnlyBookies().thenAccept(bookies -> + readOnlyBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies)))); + getWritableBookies().thenAccept(bookies -> + writableBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies)))); + } + @Override public CompletableFuture>> getWritableBookies() { - return getChildren(bookieRegistrationPath); + return getBookiesThenFreshCache(bookieRegistrationPath); } @Override public CompletableFuture>> getAllBookies() { // this method is meant to return all the known bookies, even the bookies // that are not in a running state - return getChildren(bookieAllRegistrationPath); + return getBookiesThenFreshCache(bookieAllRegistrationPath); } @Override public CompletableFuture>> getReadOnlyBookies() { - return getChildren(bookieReadonlyRegistrationPath); + return getBookiesThenFreshCache(bookieReadonlyRegistrationPath); } - private CompletableFuture>> getChildren(String path) { + /** + * @throws IllegalArgumentException if parameter path is null or empty. + */ + private CompletableFuture>> getBookiesThenFreshCache(String path) { + if (path == null || path.isEmpty()) { + return failedFuture( + new IllegalArgumentException("parameter [path] can not be null or empty.")); + } return store.getChildren(path) .thenComposeAsync(children -> { - Set bookieIds = PulsarRegistrationClient.convertToBookieAddresses(children); - List> bookieInfoUpdated = - new ArrayList<>(bookieIds.size()); + final Set bookieIds = PulsarRegistrationClient.convertToBookieAddresses(children); + final List> bookieInfoUpdated = new ArrayList<>(bookieIds.size()); for (BookieId id : bookieIds) { // update the cache for new bookies - if (!bookieServiceInfoCache.containsKey(id)) { - bookieInfoUpdated.add(readBookieServiceInfoAsync(id)); + if (path.equals(bookieReadonlyRegistrationPath) && readOnlyBookieInfo.get(id) == null) { + bookieInfoUpdated.add(readBookieInfoAsReadonlyBookie(id)); + continue; + } + if (path.equals(bookieRegistrationPath) && writableBookieInfo.get(id) == null) { + bookieInfoUpdated.add(readBookieInfoAsWritableBookie(id)); + continue; + } + if (path.equals(bookieAllRegistrationPath)) { + if (writableBookieInfo.get(id) != null || readOnlyBookieInfo.get(id) != null) { + // jump to next bookie id + continue; + } + // check writable first + final CompletableFuture revalidateAllBookiesFuture = readBookieInfoAsWritableBookie(id) + .thenCompose(writableBookieInfo -> writableBookieInfo + .>>>map( + bookieServiceInfo -> completedFuture(null)) + // check read-only then + .orElseGet(() -> readBookieInfoAsReadonlyBookie(id))); + bookieInfoUpdated.add(revalidateAllBookiesFuture); } } if (bookieInfoUpdated.isEmpty()) { - return CompletableFuture.completedFuture(bookieIds); + return completedFuture(bookieIds); } else { - return FutureUtil - .waitForAll(bookieInfoUpdated) + return waitForAll(bookieInfoUpdated) .thenApply(___ -> bookieIds); } }) @@ -153,42 +202,67 @@ public void unwatchReadOnlyBookies(RegistrationListener registrationListener) { readOnlyBookiesWatchers.remove(registrationListener); } - private void handleDeletedBookieNode(Notification n) { - if (n.getType() == NotificationType.Deleted) { - BookieId bookieId = stripBookieIdFromPath(n.getPath()); - if (bookieId != null) { - log.info("Bookie {} disappeared", bookieId); - bookieServiceInfoCache.remove(bookieId); - } + /** + * This method will receive metadata store notifications and then update the + * local cache in background sequentially. + */ + private void updatedBookies(Notification n) { + // make the notification callback run sequential in background. + final String path = n.getPath(); + if (!path.startsWith(bookieReadonlyRegistrationPath) && !path.startsWith(bookieRegistrationPath)) { + // ignore unknown path + return; } - } - - private void handleUpdatedBookieNode(Notification n) { - BookieId bookieId = stripBookieIdFromPath(n.getPath()); - if (bookieId != null) { - log.info("Bookie {} info updated", bookieId); - readBookieServiceInfoAsync(bookieId); + if (path.equals(bookieReadonlyRegistrationPath) || path.equals(bookieRegistrationPath)) { + // ignore root path + return; } - } - - private void updatedBookies(Notification n) { - if (n.getType() == NotificationType.Created || n.getType() == NotificationType.Deleted) { - if (n.getPath().startsWith(bookieReadonlyRegistrationPath)) { - getReadOnlyBookies().thenAccept(bookies -> { - readOnlyBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies))); - }); - handleDeletedBookieNode(n); - } else if (n.getPath().startsWith(bookieRegistrationPath)) { - getWritableBookies().thenAccept(bookies -> - writableBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies)))); - handleDeletedBookieNode(n); - } - } else if (n.getType() == NotificationType.Modified) { - if (n.getPath().startsWith(bookieReadonlyRegistrationPath) - || n.getPath().startsWith(bookieRegistrationPath)) { - handleUpdatedBookieNode(n); + final BookieId bookieId = stripBookieIdFromPath(n.getPath()); + sequencer.sequential(() -> { + switch (n.getType()) { + case Created: + log.info("Bookie {} created. path: {}", bookieId, n.getPath()); + if (path.startsWith(bookieReadonlyRegistrationPath)) { + return getReadOnlyBookies().thenAccept(bookies -> + readOnlyBookiesWatchers.forEach(w -> + executor.execute(() -> w.onBookiesChanged(bookies)))); + } + return getWritableBookies().thenAccept(bookies -> + writableBookiesWatchers.forEach(w -> + executor.execute(() -> w.onBookiesChanged(bookies)))); + case Modified: + if (bookieId == null) { + return completedFuture(null); + } + log.info("Bookie {} modified. path: {}", bookieId, n.getPath()); + if (path.startsWith(bookieReadonlyRegistrationPath)) { + return readBookieInfoAsReadonlyBookie(bookieId).thenApply(__ -> null); + } + return readBookieInfoAsWritableBookie(bookieId).thenApply(__ -> null); + case Deleted: + if (bookieId == null) { + return completedFuture(null); + } + log.info("Bookie {} deleted. path: {}", bookieId, n.getPath()); + if (path.startsWith(bookieReadonlyRegistrationPath)) { + readOnlyBookieInfo.remove(bookieId); + return getReadOnlyBookies().thenAccept(bookies -> { + readOnlyBookiesWatchers.forEach(w -> + executor.execute(() -> w.onBookiesChanged(bookies))); + }); + } + if (path.startsWith(bookieRegistrationPath)) { + writableBookieInfo.remove(bookieId); + return getWritableBookies().thenAccept(bookies -> { + writableBookiesWatchers.forEach(w -> + executor.execute(() -> w.onBookiesChanged(bookies))); + }); + } + return completedFuture(null); + default: + return completedFuture(null); } - } + }); } private static BookieId stripBookieIdFromPath(String path) { @@ -200,7 +274,7 @@ private static BookieId stripBookieIdFromPath(String path) { try { return BookieId.parse(path.substring(slash + 1)); } catch (IllegalArgumentException e) { - log.warn("Cannot decode bookieId from {}", path, e); + log.warn("Cannot decode bookieId from {}, error: {}", path, e.getMessage()); } } return null; @@ -227,46 +301,48 @@ public CompletableFuture> getBookieServiceInfo(Book // this is because there are a few cases in which some operations on the main thread // wait for the result. This is due to the fact that resolving the address of a bookie // is needed in many code paths. - Versioned resultFromCache = bookieServiceInfoCache.get(bookieId); + Versioned info; + if ((info = writableBookieInfo.get(bookieId)) == null) { + info = readOnlyBookieInfo.get(bookieId); + } if (log.isDebugEnabled()) { - log.debug("getBookieServiceInfo {} -> {}", bookieId, resultFromCache); + log.debug("getBookieServiceInfo {} -> {}", bookieId, info); } - if (resultFromCache != null) { - return CompletableFuture.completedFuture(resultFromCache); + if (info != null) { + return completedFuture(info); } else { return FutureUtils.exception(new BKException.BKBookieHandleNotAvailableException()); } } - public CompletableFuture readBookieServiceInfoAsync(BookieId bookieId) { - String asWritable = bookieRegistrationPath + "/" + bookieId; - return bookieServiceInfoMetadataCache.get(asWritable) - .thenCompose((Optional getResult) -> { - if (getResult.isPresent()) { - Versioned res = - new Versioned<>(getResult.get(), new LongVersion(-1)); - log.info("Update BookieInfoCache (writable bookie) {} -> {}", bookieId, getResult.get()); - bookieServiceInfoCache.put(bookieId, res); - return CompletableFuture.completedFuture(null); - } else { - return readBookieInfoAsReadonlyBookie(bookieId); - } - } - ); + public CompletableFuture>> readBookieInfoAsWritableBookie( + BookieId bookieId) { + final String asWritable = bookieRegistrationPath + "/" + bookieId; + return bookieServiceInfoMetadataCache.getWithStats(asWritable) + .thenApply((Optional> bkInfoWithStats) -> { + if (bkInfoWithStats.isPresent()) { + final CacheGetResult r = bkInfoWithStats.get(); + log.info("Update BookieInfoCache (writable bookie) {} -> {}", bookieId, r.getValue()); + writableBookieInfo.put(bookieId, + new Versioned<>(r.getValue(), new LongVersion(r.getStat().getVersion()))); + } + return bkInfoWithStats; + } + ); } - final CompletableFuture readBookieInfoAsReadonlyBookie(BookieId bookieId) { - String asReadonly = bookieReadonlyRegistrationPath + "/" + bookieId; - return bookieServiceInfoMetadataCache.get(asReadonly) - .thenApply((Optional getResultAsReadOnly) -> { - if (getResultAsReadOnly.isPresent()) { - Versioned res = - new Versioned<>(getResultAsReadOnly.get(), new LongVersion(-1)); - log.info("Update BookieInfoCache (readonly bookie) {} -> {}", bookieId, - getResultAsReadOnly.get()); - bookieServiceInfoCache.put(bookieId, res); + final CompletableFuture>> readBookieInfoAsReadonlyBookie( + BookieId bookieId) { + final String asReadonly = bookieReadonlyRegistrationPath + "/" + bookieId; + return bookieServiceInfoMetadataCache.getWithStats(asReadonly) + .thenApply((Optional> bkInfoWithStats) -> { + if (bkInfoWithStats.isPresent()) { + final CacheGetResult r = bkInfoWithStats.get(); + log.info("Update BookieInfoCache (readonly bookie) {} -> {}", bookieId, r.getValue()); + readOnlyBookieInfo.put(bookieId, + new Versioned<>(r.getValue(), new LongVersion(r.getStat().getVersion()))); } - return null; + return bkInfoWithStats; }); } } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java index 32ec89e717e0e..25c3f10aa18fa 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java @@ -135,7 +135,7 @@ public void registerBookie(BookieId bookieId, boolean readOnly, BookieServiceInf if (readOnly) { ResourceLock rwRegistration = bookieRegistration.remove(bookieId); if (rwRegistration != null) { - log.info("Bookie {} was already registered as writable, unregistering"); + log.info("Bookie {} was already registered as writable, unregistering", bookieId); rwRegistration.release().get(); } @@ -144,7 +144,7 @@ public void registerBookie(BookieId bookieId, boolean readOnly, BookieServiceInf } else { ResourceLock roRegistration = bookieRegistrationReadOnly.remove(bookieId); if (roRegistration != null) { - log.info("Bookie {} was already registered as read-only, unregistering"); + log.info("Bookie {} was already registered as read-only, unregistering", bookieId); roRegistration.release().get(); } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java index 072d513cca962..cc148c2a3117a 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java @@ -23,6 +23,7 @@ import com.github.benmanes.caffeine.cache.AsyncCacheLoader; import com.github.benmanes.caffeine.cache.AsyncLoadingCache; import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; import com.google.common.annotations.VisibleForTesting; import io.netty.util.concurrent.DefaultThreadFactory; import java.time.Instant; @@ -41,6 +42,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import lombok.Getter; import lombok.extern.slf4j.Slf4j; @@ -328,6 +330,7 @@ public void accept(Notification n) { if (type == NotificationType.Created || type == NotificationType.Deleted) { existsCache.synchronous().invalidate(path); + childrenCache.synchronous().invalidate(path); String parent = parent(path); if (parent != null) { childrenCache.synchronous().invalidate(parent); @@ -385,6 +388,7 @@ private CompletableFuture deleteInternal(String path, Optional expec // Ensure caches are invalidated before the operation is confirmed return storeDelete(path, expectedVersion).thenRun(() -> { existsCache.synchronous().invalidate(path); + childrenCache.synchronous().invalidate(path); String parent = parent(path); if (parent != null) { childrenCache.synchronous().invalidate(parent); @@ -520,6 +524,13 @@ public void invalidateAll() { existsCache.synchronous().invalidateAll(); } + public void invalidateCaches(String...paths) { + LoadingCache> loadingCache = childrenCache.synchronous(); + for (String path : paths) { + loadingCache.invalidate(path); + } + } + /** * Run the task in the executor thread and fail the future if the executor is shutting down. */ @@ -532,6 +543,18 @@ public void execute(Runnable task, CompletableFuture future) { } } + /** + * Run the task in the executor thread and fail the future if the executor is shutting down. + */ + @VisibleForTesting + public void execute(Runnable task, Supplier>> futures) { + try { + executor.execute(task); + } catch (final Throwable t) { + futures.get().forEach(f -> f.completeExceptionally(t)); + } + } + protected static String parent(String path) { int idx = path.lastIndexOf('/'); if (idx <= 0) { diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java index a6d8eb8344c96..079ae3e2ae5c3 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java @@ -204,29 +204,29 @@ protected void batchOperation(List ops) { } // Trigger all the futures in the batch - for (int i = 0; i < ops.size(); i++) { - OpResult opr = results.get(i); - MetadataOp op = ops.get(i); - - switch (op.getType()) { - case PUT: - handlePutResult(op.asPut(), opr); - break; - case DELETE: - handleDeleteResult(op.asDelete(), opr); - break; - case GET: - handleGetResult(op.asGet(), opr); - break; - case GET_CHILDREN: - handleGetChildrenResult(op.asGetChildren(), opr); - break; - - default: - op.getFuture().completeExceptionally(new MetadataStoreException( - "Operation type not supported in multi: " + op.getType())); - } - } + execute(() -> { + for (int i = 0; i < ops.size(); i++) { + OpResult opr = results.get(i); + MetadataOp op = ops.get(i); + switch (op.getType()) { + case PUT: + handlePutResult(op.asPut(), opr); + break; + case DELETE: + handleDeleteResult(op.asDelete(), opr); + break; + case GET: + handleGetResult(op.asGet(), opr); + break; + case GET_CHILDREN: + handleGetChildrenResult(op.asGetChildren(), opr); + break; + default: + op.getFuture().completeExceptionally(new MetadataStoreException( + "Operation type not supported in multi: " + op.getType())); + } + } + }, () -> ops.stream().map(MetadataOp::getFuture).collect(Collectors.toList())); }, null); } catch (Throwable t) { ops.forEach(o -> o.getFuture().completeExceptionally(new MetadataStoreException(t))); diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java index 1ce01f57d4fbe..a840721023080 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java @@ -81,7 +81,11 @@ public void close() throws Exception { } // task that runs every TICK_TIME to check zk connection - private synchronized void checkConnectionStatus() { + // NOT ThreadSafe: + // If zk client can't ensure the order, it may lead to problems. + // Currently,we only use it in single thread, it will be fine. but we shouldn't leave any potential problems + // in the future. + private void checkConnectionStatus() { try { CompletableFuture future = new CompletableFuture<>(); zk.exists("/", false, (StatCallback) (rc, path, ctx, stat) -> { @@ -126,7 +130,7 @@ synchronized void setSessionInvalid() { currentStatus = SessionEvent.SessionLost; } - private void checkState(Watcher.Event.KeeperState zkClientState) { + private synchronized void checkState(Watcher.Event.KeeperState zkClientState) { switch (zkClientState) { case Expired: if (currentStatus != SessionEvent.SessionLost) { diff --git a/pulsar-metadata/src/main/resources/findbugsExclude.xml b/pulsar-metadata/src/main/resources/findbugsExclude.xml index 21578b9f8cb5c..d6eb8623eb73e 100644 --- a/pulsar-metadata/src/main/resources/findbugsExclude.xml +++ b/pulsar-metadata/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - + + + + + + + + + + + + diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieCheckTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieCheckTaskTest.java new file mode 100644 index 0000000000000..d30af4de6a803 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieCheckTaskTest.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.testng.AssertJUnit.assertTrue; +import com.beust.jcommander.internal.Lists; +import com.beust.jcommander.internal.Sets; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.stats.OpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.versioning.LongVersion; +import org.apache.bookkeeper.versioning.Versioned; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorBookieCheckTask}. + */ +public class AuditorBookieCheckTaskTest { + + private AuditorStats auditorStats; + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager underreplicationManager; + private BookieLedgerIndexer ledgerIndexer; + private AuditorBookieCheckTask bookieCheckTask; + private final AtomicBoolean shutdownCompleted = new AtomicBoolean(false); + private final AuditorTask.ShutdownTaskHandler shutdownTaskHandler = () -> shutdownCompleted.set(true); + private long startLedgerId = 0; + + @BeforeMethod + public void setup() { + ServerConfiguration conf = mock(ServerConfiguration.class); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + this.auditorStats = spy(auditorStats); + admin = mock(BookKeeperAdmin.class); + ledgerManager = mock(LedgerManager.class); + underreplicationManager = mock(LedgerUnderreplicationManager.class); + ledgerIndexer = mock(BookieLedgerIndexer.class); + AuditorBookieCheckTask bookieCheckTask1 = new AuditorBookieCheckTask( + conf, this.auditorStats, admin, ledgerManager, underreplicationManager, + shutdownTaskHandler, ledgerIndexer, null, null); + bookieCheckTask = spy(bookieCheckTask1); + } + + @Test + public void testShutdownAuditBookiesException() + throws BKException, ReplicationException.BKAuditException, InterruptedException { + doThrow(new ReplicationException.BKAuditException("test failed")) + .when(bookieCheckTask) + .auditBookies(); + bookieCheckTask.startAudit(true); + + assertTrue("shutdownTaskHandler should be execute.", shutdownCompleted.get()); + } + + @Test + public void testAuditBookies() + throws ReplicationException.UnavailableException, ReplicationException.BKAuditException, BKException { + final String bookieId1 = "127.0.0.1:1000"; + final String bookieId2 = "127.0.0.1:1001"; + final long bookie1LedgersCount = 10; + final long bookie2LedgersCount = 20; + + final Map> bookiesAndLedgers = new HashMap<>(); + bookiesAndLedgers.put(bookieId1, getLedgers(bookie1LedgersCount)); + bookiesAndLedgers.put(bookieId2, getLedgers(bookie2LedgersCount)); + when(ledgerIndexer.getBookieToLedgerIndex()).thenReturn(bookiesAndLedgers); + when(underreplicationManager.isLedgerReplicationEnabled()).thenReturn(true); + + CompletableFuture> metaPromise = new CompletableFuture<>(); + final LongVersion version = mock(LongVersion.class); + final LedgerMetadata metadata = mock(LedgerMetadata.class); + metaPromise.complete(new Versioned<>(metadata, version)); + when(ledgerManager.readLedgerMetadata(anyLong())).thenReturn(metaPromise); + + CompletableFuture markPromise = new CompletableFuture<>(); + markPromise.complete(null); + when(underreplicationManager.markLedgerUnderreplicatedAsync(anyLong(), anyCollection())) + .thenReturn(markPromise); + + OpStatsLogger numUnderReplicatedLedgerStats = mock(OpStatsLogger.class); + when(auditorStats.getNumUnderReplicatedLedger()).thenReturn(numUnderReplicatedLedgerStats); + + final List availableBookies = Lists.newArrayList(); + final List readOnlyBookies = Lists.newArrayList(); + // test bookie1 lost + availableBookies.add(BookieId.parse(bookieId2)); + when(admin.getAvailableBookies()).thenReturn(availableBookies); + when(admin.getReadOnlyBookies()).thenReturn(readOnlyBookies); + bookieCheckTask.startAudit(true); + verify(numUnderReplicatedLedgerStats, times(1)) + .registerSuccessfulValue(eq(bookie1LedgersCount)); + + // test bookie2 lost + numUnderReplicatedLedgerStats = mock(OpStatsLogger.class); + when(auditorStats.getNumUnderReplicatedLedger()).thenReturn(numUnderReplicatedLedgerStats); + availableBookies.clear(); + availableBookies.add(BookieId.parse(bookieId1)); + bookieCheckTask.startAudit(true); + verify(numUnderReplicatedLedgerStats, times(1)) + .registerSuccessfulValue(eq(bookie2LedgersCount)); + + } + + private Set getLedgers(long count) { + final Set ledgers = Sets.newHashSet(); + for (int i = 0; i < count; i++) { + ledgers.add(i + startLedgerId++); + } + return ledgers; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieTest.java new file mode 100644 index 0000000000000..14cdf3e1fc29c --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieTest.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNotSame; +import static org.testng.AssertJUnit.assertSame; +import static org.testng.AssertJUnit.assertTrue; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerAuditorManager; +import org.apache.zookeeper.ZooKeeper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies the auditor bookie scenarios which will be monitoring the + * bookie failures. + */ +public class AuditorBookieTest extends BookKeeperClusterTestCase { + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // private static final Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory + .getLogger(AuditorBookieTest.class); + private String electionPath; + private HashMap auditorElectors = new HashMap(); + private List zkClients = new LinkedList(); + + public AuditorBookieTest() throws Exception { + super(6); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + electionPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + + "/underreplication/" + PulsarLedgerAuditorManager.ELECTION_PATH; + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + startAuditorElectors(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + stopAuditorElectors(); + for (ZooKeeper zk : zkClients) { + zk.close(); + } + zkClients.clear(); + super.tearDown(); + } + + /** + * Test should ensure only one should act as Auditor. Starting/shutdown + * other than auditor bookie shouldn't initiate re-election and multiple + * auditors. + */ + @Test + public void testEnsureOnlySingleAuditor() throws Exception { + BookieServer auditor = verifyAuditor(); + + // shutdown bookie which is not an auditor + int indexOf = indexOfServer(auditor); + int bkIndexDownBookie; + if (indexOf < lastBookieIndex()) { + bkIndexDownBookie = indexOf + 1; + } else { + bkIndexDownBookie = indexOf - 1; + } + shutdownBookie(serverByIndex(bkIndexDownBookie)); + + startNewBookie(); + startNewBookie(); + // grace period for the auditor re-election if any + BookieServer newAuditor = waitForNewAuditor(auditor); + assertSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + } + + /** + * Test Auditor crashes should trigger re-election and another bookie should + * take over the auditor ship. + */ + @Test + public void testSuccessiveAuditorCrashes() throws Exception { + BookieServer auditor = verifyAuditor(); + shutdownBookie(auditor); + + BookieServer newAuditor1 = waitForNewAuditor(auditor); + shutdownBookie(newAuditor1); + BookieServer newAuditor2 = waitForNewAuditor(newAuditor1); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor2); + } + + /** + * Test restarting the entire bookie cluster. It shouldn't create multiple + * bookie auditors. + */ + @Test + public void testBookieClusterRestart() throws Exception { + BookieServer auditor = verifyAuditor(); + for (AuditorElector auditorElector : auditorElectors.values()) { + assertTrue("Auditor elector is not running!", auditorElector + .isRunning()); + } + stopBKCluster(); + stopAuditorElectors(); + + startBKCluster(zkUtil.getMetadataServiceUri()); + //startBKCluster(zkUtil.getMetadataServiceUri()) override the base conf metadataServiceUri + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + startAuditorElectors(); + BookieServer newAuditor = waitForNewAuditor(auditor); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + } + + /** + * Test the vote is deleting from the ZooKeeper during shutdown. + */ + @Test + public void testShutdown() throws Exception { + BookieServer auditor = verifyAuditor(); + shutdownBookie(auditor); + + // waiting for new auditor + BookieServer newAuditor = waitForNewAuditor(auditor); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + + List children = zkc.getChildren(electionPath, false); + for (String child : children) { + byte[] data = zkc.getData(electionPath + '/' + child, false, null); + String bookieIP = new String(data); + String addr = auditor.getBookieId().toString(); + assertFalse("AuditorElection cleanup fails", bookieIP + .contains(addr)); + } + } + + /** + * Test restart of the previous Auditor bookie shouldn't initiate + * re-election and should create new vote after restarting. + */ + @Test + public void testRestartAuditorBookieAfterCrashing() throws Exception { + BookieServer auditor = verifyAuditor(); + + String addr = auditor.getBookieId().toString(); + + // restarting Bookie with same configurations. + ServerConfiguration serverConfiguration = shutdownBookie(auditor); + + auditorElectors.remove(addr); + startBookie(serverConfiguration); + // starting corresponding auditor elector + + if (LOG.isDebugEnabled()) { + LOG.debug("Performing Auditor Election:" + addr); + } + startAuditorElector(addr); + + // waiting for new auditor to come + BookieServer newAuditor = waitForNewAuditor(auditor); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + assertFalse("No relection after old auditor rejoins", auditor + .getBookieId().equals(newAuditor.getBookieId())); + } + + private void startAuditorElector(String addr) throws Exception { + AuditorElector auditorElector = new AuditorElector(addr, + baseConf); + auditorElectors.put(addr, auditorElector); + auditorElector.start(); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting Auditor Elector"); + } + } + + private void startAuditorElectors() throws Exception { + for (BookieId addr : bookieAddresses()) { + startAuditorElector(addr.toString()); + } + } + + private void stopAuditorElectors() throws Exception { + for (AuditorElector auditorElector : auditorElectors.values()) { + auditorElector.shutdown(); + if (LOG.isDebugEnabled()) { + LOG.debug("Stopping Auditor Elector!"); + } + } + } + + private BookieServer verifyAuditor() throws Exception { + List auditors = getAuditorBookie(); + assertEquals("Multiple Bookies acting as Auditor!", 1, auditors + .size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Bookie running as Auditor:" + auditors.get(0)); + } + return auditors.get(0); + } + + private List getAuditorBookie() throws Exception { + List auditors = new LinkedList(); + byte[] data = zkc.getData(electionPath, false, null); + assertNotNull("Auditor election failed", data); + for (int i = 0; i < bookieCount(); i++) { + BookieServer bks = serverByIndex(i); + if (new String(data).contains(bks.getBookieId() + "")) { + auditors.add(bks); + } + } + return auditors; + } + + private ServerConfiguration shutdownBookie(BookieServer bkServer) throws Exception { + int index = indexOfServer(bkServer); + String addr = addressByIndex(index).toString(); + if (LOG.isDebugEnabled()) { + LOG.debug("Shutting down bookie:" + addr); + } + + // shutdown bookie which is an auditor + ServerConfiguration conf = killBookie(index); + + // stopping corresponding auditor elector + auditorElectors.get(addr).shutdown(); + return conf; + } + + private BookieServer waitForNewAuditor(BookieServer auditor) + throws Exception { + BookieServer newAuditor = null; + int retryCount = 8; + while (retryCount > 0) { + try { + List auditors = getAuditorBookie(); + if (auditors.size() > 0) { + newAuditor = auditors.get(0); + if (auditor != newAuditor) { + break; + } + } + } catch (Exception ignore) { + } + + Thread.sleep(500); + retryCount--; + } + assertNotNull( + "New Auditor is not reelected after auditor crashes", + newAuditor); + verifyAuditor(); + return newAuditor; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorCheckAllLedgersTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorCheckAllLedgersTaskTest.java new file mode 100644 index 0000000000000..6b58c72af0766 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorCheckAllLedgersTaskTest.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.bookkeeper.PulsarLayoutManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorCheckAllLedgersTask}. + */ +public class AuditorCheckAllLedgersTaskTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorCheckAllLedgersTaskTest.class); + + private static final int maxNumberOfConcurrentOpenLedgerOperations = 500; + private static final int acquireConcurrentOpenLedgerOperationsTimeoutMSec = 120000; + + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager ledgerUnderreplicationManager; + + public AuditorCheckAllLedgersTaskTest() { + super(3); + baseConf.setPageLimit(1); + baseConf.setAutoRecoveryDaemonEnabled(false); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + final BookKeeper bookKeeper = new BookKeeper(baseClientConf); + admin = new BookKeeperAdmin(bookKeeper, NullStatsLogger.INSTANCE, new ClientConfiguration(baseClientConf)); + + String ledgersRoot = "/ledgers"; + String storeUri = metadataServiceUri.replaceAll("zk://", "").replaceAll("/ledgers", ""); + MetadataStoreExtended store = MetadataStoreExtended.create(storeUri, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRoot); + PulsarLedgerManagerFactory ledgerManagerFactory = new PulsarLedgerManagerFactory(); + + ClientConfiguration conf = new ClientConfiguration(); + conf.setZkLedgersRootPath(ledgersRoot); + ledgerManagerFactory.initialize(conf, layoutManager, 1); + ledgerUnderreplicationManager = ledgerManagerFactory.newLedgerUnderreplicationManager(); + ledgerManager = ledgerManagerFactory.newLedgerManager(); + + baseConf.setAuditorMaxNumberOfConcurrentOpenLedgerOperations(maxNumberOfConcurrentOpenLedgerOperations); + baseConf.setAuditorAcquireConcurrentOpenLedgerOperationsTimeoutMSec( + acquireConcurrentOpenLedgerOperationsTimeoutMSec); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } + if (admin != null) { + admin.close(); + } + super.tearDown(); + } + + @Test + public void testCheckAllLedgers() throws Exception { + // 1. create ledgers + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + // 2. init CheckAllLedgersTask + final TestStatsProvider statsProvider = new TestStatsProvider(); + final TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + + AuditorCheckAllLedgersTask auditorCheckAllLedgersTask = new AuditorCheckAllLedgersTask( + baseConf, auditorStats, admin, ledgerManager, + ledgerUnderreplicationManager, null, (flag, throwable) -> flag.set(false)); + + // 3. checkAllLedgers + auditorCheckAllLedgersTask.runTask(); + + // 4. verify + assertEquals("CHECK_ALL_LEDGERS_TIME", 1, ((TestStatsProvider.TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.CHECK_ALL_LEDGERS_TIME)).getSuccessCount()); + assertEquals("NUM_LEDGERS_CHECKED", numLedgers, + (long) statsLogger.getCounter(ReplicationStats.NUM_LEDGERS_CHECKED).get()); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java new file mode 100644 index 0000000000000..970c45964f784 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java @@ -0,0 +1,1138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNotSame; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import lombok.Cleanup; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.AsyncCallback.AddCallback; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataClientDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.UnderreplicatedLedger; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.bookkeeper.PulsarLayoutManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerAuditorManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests publishing of under replicated ledgers by the Auditor bookie node when + * corresponding bookies identifes as not running. + */ +public class AuditorLedgerCheckerTest extends BookKeeperClusterTestCase { + + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // private static final Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory + .getLogger(AuditorLedgerCheckerTest.class); + + private static final byte[] ledgerPassword = "aaa".getBytes(); + private Random rng; // Random Number Generator + + private DigestType digestType; + + private String underreplicatedPath; + private Map auditorElectors = new ConcurrentHashMap<>(); + private LedgerUnderreplicationManager urLedgerMgr; + + private Set urLedgerList; + private String electionPath; + + private List ledgerList; + + public AuditorLedgerCheckerTest() + throws Exception { + this("org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + } + + AuditorLedgerCheckerTest(String ledgerManagerFactoryClass) + throws Exception { + super(3); + LOG.info("Running test case using ledger manager : " + + ledgerManagerFactoryClass); + this.digestType = DigestType.CRC32; + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); // set ledger manager name + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactoryClass); + baseClientConf + .setLedgerManagerFactoryClassName(ledgerManagerFactoryClass); + } + + @BeforeMethod + public void setUp() throws Exception { + super.setUp(); + underreplicatedPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseClientConf) + + "/underreplication/ledgers"; + electionPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + + "/underreplication/" + PulsarLedgerAuditorManager.ELECTION_PATH; + + String ledgersRoot = "/ledgers"; + String storeUri = metadataServiceUri.replaceAll("zk://", "").replaceAll("/ledgers", ""); + MetadataStoreExtended store = MetadataStoreExtended.create(storeUri, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRoot); + PulsarLedgerManagerFactory ledgerManagerFactory = new PulsarLedgerManagerFactory(); + ClientConfiguration conf = new ClientConfiguration(); + conf.setZkLedgersRootPath(ledgersRoot); + ledgerManagerFactory.initialize(conf, layoutManager, 1); + urLedgerMgr = ledgerManagerFactory.newLedgerUnderreplicationManager(); + urLedgerMgr.setCheckAllLedgersCTime(System.currentTimeMillis()); + + baseClientConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + startAuditorElectors(); + rng = new Random(System.currentTimeMillis()); // Initialize the Random + urLedgerList = new HashSet(); + ledgerList = new ArrayList(2); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + stopAuditorElectors(); + super.tearDown(); + } + + private void startAuditorElectors() throws Exception { + for (String addr : bookieAddresses().stream().map(Object::toString) + .collect(Collectors.toList())) { + AuditorElector auditorElector = new AuditorElector(addr, baseConf); + auditorElectors.put(addr, auditorElector); + auditorElector.start(); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting Auditor Elector"); + } + } + } + + private void stopAuditorElectors() throws Exception { + for (AuditorElector auditorElector : auditorElectors.values()) { + auditorElector.shutdown(); + if (LOG.isDebugEnabled()) { + LOG.debug("Stopping Auditor Elector!"); + } + } + } + + /** + * Test publishing of under replicated ledgers by the auditor bookie. + */ + @Test + public void testSimpleLedger() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + int bkShutdownIndex = lastBookieIndex(); + String shutdownBookie = shutdownBookie(bkShutdownIndex); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + waitForAuditToComplete(); + underReplicaLatch.await(5, TimeUnit.SECONDS); + Map urLedgerData = getUrLedgerData(urLedgerList); + assertEquals("Missed identifying under replicated ledgers", 1, + urLedgerList.size()); + + /* + * Sample data format present in the under replicated ledger path + * + * {4=replica: "10.18.89.153:5002"} + */ + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + String data = urLedgerData.get(ledgerId); + assertTrue("Bookie " + shutdownBookie + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookie)); + } + + /** + * Test once published under replicated ledger should exists even after + * restarting respective bookie. + */ + @Test + public void testRestartBookie() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + LedgerHandle lh2 = createAndAddEntriesToLedger(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : {}, {}", lh1, lh2); + } + + int bkShutdownIndex = lastBookieIndex(); + ServerConfiguration bookieConf1 = confByIndex(bkShutdownIndex); + String shutdownBookie = shutdownBookie(bkShutdownIndex); + + // restart the failed bookie + startAndAddBookie(bookieConf1); + + waitForLedgerMissingReplicas(lh1.getId(), 10, shutdownBookie); + waitForLedgerMissingReplicas(lh2.getId(), 10, shutdownBookie); + } + + /** + * Test publishing of under replicated ledgers when multiple bookie failures + * one after another. + */ + @Test + public void testMultipleBookieFailures() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + + // failing first bookie + shutdownBookie(lastBookieIndex()); + + // simulate re-replication + doLedgerRereplication(lh1.getId()); + + // failing another bookie + String shutdownBookie = shutdownBookie(lastBookieIndex()); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertTrue("Ledger should be missing second replica", + waitForLedgerMissingReplicas(lh1.getId(), 10, shutdownBookie)); + } + + @Test + public void testToggleLedgerReplication() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + ledgerList.add(lh1.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : " + ledgerList); + } + + // failing another bookie + CountDownLatch urReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // disabling ledger replication + urLedgerMgr.disableLedgerReplication(); + ArrayList shutdownBookieList = new ArrayList(); + shutdownBookieList.add(shutdownBookie(lastBookieIndex())); + shutdownBookieList.add(shutdownBookie(lastBookieIndex())); + + assertFalse("Ledger replication is not disabled!", urReplicaLatch + .await(1, TimeUnit.SECONDS)); + + // enabling ledger replication + urLedgerMgr.enableLedgerReplication(); + assertTrue("Ledger replication is not enabled!", urReplicaLatch.await( + 5, TimeUnit.SECONDS)); + } + + @Test + public void testDuplicateEnDisableAutoRecovery() throws Exception { + urLedgerMgr.disableLedgerReplication(); + try { + urLedgerMgr.disableLedgerReplication(); + fail("Must throw exception, since AutoRecovery is already disabled"); + } catch (UnavailableException e) { + assertTrue("AutoRecovery is not disabled previously!", + e.getCause().getCause() instanceof MetadataStoreException.BadVersionException); + } + urLedgerMgr.enableLedgerReplication(); + try { + urLedgerMgr.enableLedgerReplication(); + fail("Must throw exception, since AutoRecovery is already enabled"); + } catch (UnavailableException e) { + assertTrue("AutoRecovery is not enabled previously!", + e.getCause().getCause() instanceof MetadataStoreException.NotFoundException); + } + } + + /** + * Test Auditor should consider Readonly bookie as available bookie. Should not publish ur ledgers for + * readonly bookies. + */ + @Test + public void testReadOnlyBookieExclusionFromURLedgersCheck() throws Exception { + LedgerHandle lh = createAndAddEntriesToLedger(); + ledgerList.add(lh.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : " + ledgerList); + } + + int count = ledgerList.size(); + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(count); + + final int bkIndex = 2; + ServerConfiguration bookieConf = confByIndex(bkIndex); + BookieServer bk = serverByIndex(bkIndex); + bookieConf.setReadOnlyModeEnabled(true); + + ((BookieImpl) bk.getBookie()).getStateManager().doTransitionToReadOnlyMode(); + bkc.waitForReadOnlyBookie(BookieImpl.getBookieId(confByIndex(bkIndex))) + .get(30, TimeUnit.SECONDS); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for Auditor to finish ledger check."); + } + waitForAuditToComplete(); + assertFalse("latch should not have completed", underReplicaLatch.await(5, TimeUnit.SECONDS)); + } + + /** + * Test Auditor should consider Readonly bookie fail and publish ur ledgers for readonly bookies. + */ + @Test + public void testReadOnlyBookieShutdown() throws Exception { + LedgerHandle lh = createAndAddEntriesToLedger(); + long ledgerId = lh.getId(); + ledgerList.add(ledgerId); + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : " + ledgerList); + } + + int count = ledgerList.size(); + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(count); + + int bkIndex = lastBookieIndex(); + if (LOG.isDebugEnabled()) { + LOG.debug("Moving bookie {} {} to read only...", bkIndex, serverByIndex(bkIndex)); + } + ServerConfiguration bookieConf = confByIndex(bkIndex); + BookieServer bk = serverByIndex(bkIndex); + bookieConf.setReadOnlyModeEnabled(true); + + ((BookieImpl) bk.getBookie()).getStateManager().doTransitionToReadOnlyMode(); + bkc.waitForReadOnlyBookie(BookieImpl.getBookieId(confByIndex(bkIndex))) + .get(30, TimeUnit.SECONDS); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for Auditor to finish ledger check."); + } + waitForAuditToComplete(); + assertFalse("latch should not have completed", underReplicaLatch.await(1, TimeUnit.SECONDS)); + + String shutdownBookie = shutdownBookie(bkIndex); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + waitForAuditToComplete(); + underReplicaLatch.await(5, TimeUnit.SECONDS); + Map urLedgerData = getUrLedgerData(urLedgerList); + assertEquals("Missed identifying under replicated ledgers", 1, urLedgerList.size()); + + /* + * Sample data format present in the under replicated ledger path + * + * {4=replica: "10.18.89.153:5002"} + */ + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, urLedgerList.contains(ledgerId)); + String data = urLedgerData.get(ledgerId); + assertTrue("Bookie " + shutdownBookie + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookie)); + } + + public void testInnerDelayedAuditOfLostBookies() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(5); + + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(4, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // wait for another 5 seconds for the ledger to get reported as under replicated + assertTrue("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookieRef.get())); + } + + /** + * Test publishing of under replicated ledgers by the auditor + * bookie is delayed if LostBookieRecoveryDelay option is set. + */ + @Test + public void testDelayedAuditOfLostBookies() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + testInnerDelayedAuditOfLostBookies(); + } + + /** + * Test publishing of under replicated ledgers by the auditor + * bookie is delayed if LostBookieRecoveryDelay option is set + * and it continues to be delayed even when periodic bookie check + * is set to run every 2 secs. I.e. periodic bookie check doesn't + * override the delay + */ + @Test + public void testDelayedAuditWithPeriodicBookieCheck() throws Exception { + // enable periodic bookie check on a cadence of every 2 seconds. + // this requires us to stop the auditor/auditorElectors, set the + // periodic check interval and restart the auditorElectors + stopAuditorElectors(); + baseConf.setAuditorPeriodicBookieCheckInterval(2); + startAuditorElectors(); + + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + // the delaying of audit should just work despite the fact + // we have enabled periodic bookie check + testInnerDelayedAuditOfLostBookies(); + } + + @Test + public void testRescheduleOfDelayedAuditOfLostBookiesToStartImmediately() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // wait for 50 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(50); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(4, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // set lostBookieRecoveryDelay to 0, so that it triggers AuditTask immediately + urLedgerMgr.setLostBookieRecoveryDelay(0); + + // wait for 1 second for the ledger to get reported as under replicated + assertTrue("audit of lost bookie isn't delayed", underReplicaLatch.await(1, TimeUnit.SECONDS)); + + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookieRef.get())); + } + + @Test + public void testRescheduleOfDelayedAuditOfLostBookiesToStartLater() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // wait for 3 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(3); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // set lostBookieRecoveryDelay to 4, so the pending AuditTask is resheduled + urLedgerMgr.setLostBookieRecoveryDelay(4); + + // since we changed the BookieRecoveryDelay period to 4, the audittask shouldn't have been executed + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // wait for 3 seconds (since we already waited for 2 secs) for the ledger to get reported as under replicated + assertTrue("audit of lost bookie isn't delayed", underReplicaLatch.await(3, TimeUnit.SECONDS)); + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookieRef.get())); + } + + @Test + public void testTriggerAuditorWithNoPendingAuditTask() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + int lostBookieRecoveryDelayConfValue = baseConf.getLostBookieRecoveryDelay(); + Auditor auditorBookiesAuditor = getAuditorBookiesAuditor(); + Future auditTask = auditorBookiesAuditor.getAuditTask(); + int lostBookieRecoveryDelayBeforeChange = auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to BaseConf's lostBookieRecoveryDelay", + lostBookieRecoveryDelayConfValue, lostBookieRecoveryDelayBeforeChange); + + @Cleanup("shutdown") OrderedScheduler scheduler = OrderedScheduler.newSchedulerBuilder() + .name("test-scheduler") + .numThreads(1) + .build(); + @Cleanup MetadataClientDriver driver = + MetadataDrivers.getClientDriver(URI.create(baseClientConf.getMetadataServiceUri())); + driver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.of(zkc)); + + // there is no easy way to validate if the Auditor has executed Audit process (Auditor.startAudit), + // without shuttingdown Bookie. To test if by resetting LostBookieRecoveryDelay it does Auditing + // even when there is no pending AuditTask, following approach is needed. + + // Here we are creating few ledgers ledgermetadata with non-existing bookies as its ensemble. + // When Auditor does audit it recognizes these ledgers as underreplicated and mark them as + // under-replicated, since these bookies are not available. + int numofledgers = 5; + Random rand = new Random(); + for (int i = 0; i < numofledgers; i++) { + ArrayList ensemble = new ArrayList(); + ensemble.add(new BookieSocketAddress("99.99.99.99:9999").toBookieId()); + ensemble.add(new BookieSocketAddress("11.11.11.11:1111").toBookieId()); + ensemble.add(new BookieSocketAddress("88.88.88.88:8888").toBookieId()); + + long ledgerId = (Math.abs(rand.nextLong())) % 100000000; + + LedgerMetadata metadata = LedgerMetadataBuilder.create() + .withId(ledgerId) + .withEnsembleSize(3).withWriteQuorumSize(2).withAckQuorumSize(2) + .withPassword("passwd".getBytes()) + .withDigestType(DigestType.CRC32.toApiDigestType()) + .newEnsembleEntry(0L, ensemble).build(); + + try (LedgerManager lm = driver.getLedgerManagerFactory().newLedgerManager()) { + lm.createLedgerMetadata(ledgerId, metadata).get(2000, TimeUnit.MILLISECONDS); + } + ledgerList.add(ledgerId); + } + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size()); + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelayBeforeChange); + assertTrue("Audit should be triggered and created ledgers should be marked as underreplicated", + underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("All the ledgers should be marked as underreplicated", ledgerList.size(), urLedgerList.size()); + + auditTask = auditorBookiesAuditor.getAuditTask(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to BaseConf's lostBookieRecoveryDelay", + lostBookieRecoveryDelayBeforeChange, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + } + + @Test + public void testTriggerAuditorWithPendingAuditTask() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + Auditor auditorBookiesAuditor = getAuditorBookiesAuditor(); + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + int lostBookieRecoveryDelay = 5; + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + new Thread(() -> { + try { + shutDownNonAuditorBookie(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + Future auditTask = auditorBookiesAuditor.getAuditTask(); + assertNotSame("auditTask is not supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to what we set", + lostBookieRecoveryDelay, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + + // set lostBookieRecoveryDelay to 5 (previous value), so that Auditor is triggered immediately + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); + assertTrue("audit of lost bookie shouldn't be delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("all under replicated ledgers should be identified", ledgerList.size(), + urLedgerList.size()); + + Thread.sleep(100); + auditTask = auditorBookiesAuditor.getAuditTask(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to previously set value", + lostBookieRecoveryDelay, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + } + + @Test + public void testTriggerAuditorBySettingDelayToZeroWithPendingAuditTask() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + Auditor auditorBookiesAuditor = getAuditorBookiesAuditor(); + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + int lostBookieRecoveryDelay = 5; + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + new Thread(() -> { + try { + shutDownNonAuditorBookie(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + Future auditTask = auditorBookiesAuditor.getAuditTask(); + assertNotSame("auditTask is not supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to what we set", + lostBookieRecoveryDelay, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + + // set lostBookieRecoveryDelay to 0, so that Auditor is triggered immediately + urLedgerMgr.setLostBookieRecoveryDelay(0); + assertTrue("audit of lost bookie shouldn't be delayed", underReplicaLatch.await(1, TimeUnit.SECONDS)); + assertEquals("all under replicated ledgers should be identified", ledgerList.size(), + urLedgerList.size()); + + Thread.sleep(100); + auditTask = auditorBookiesAuditor.getAuditTask(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to previously set value", + 0, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + } + + /** + * Test audit of bookies is delayed when one bookie is down. But when + * another one goes down, the audit is started immediately. + */ + @Test + public void testDelayedAuditWithMultipleBookieFailures() throws Exception { + // wait for the periodic bookie check to finish + Thread.sleep(1000); + + // create a ledger with a bunch of entries + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size()); + + // wait for 10 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(10); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef1 = new AtomicReference<>(); + CountDownLatch shutdownLatch1 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie1 = shutDownNonAuditorBookie(); + shutdownBookieRef1.set(shutdownBookie1); + shutdownLatch1.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // wait for 3 seconds and there shouldn't be any under replicated ledgers + // because we have delayed the start of audit by 10 seconds + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(3, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // Now shutdown the second non auditor bookie; We want to make sure that + // the history about having delayed recovery remains. Hence we make sure + // we bring down a non auditor bookie. This should cause the audit to take + // place immediately and not wait for the remaining 7 seconds to elapse + AtomicReference shutdownBookieRef2 = new AtomicReference<>(); + CountDownLatch shutdownLatch2 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie2 = shutDownNonAuditorBookie(); + shutdownBookieRef2.set(shutdownBookie2); + shutdownLatch2.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // 2 second grace period for the ledgers to get reported as under replicated + Thread.sleep(2000); + + // If the following checks pass, it means that audit happened + // within 2 seconds of second bookie going down and it didn't + // wait for 7 more seconds. Hence the second bookie failure doesn't + // delay the audit + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch1.await(); + shutdownLatch2.await(); + assertTrue("Bookie " + shutdownBookieRef1.get() + shutdownBookieRef2.get() + + " are not listed in the ledger as missing replicas :" + data, + data.contains(shutdownBookieRef1.get()) && data.contains(shutdownBookieRef2.get())); + } + + /** + * Test audit of bookies is delayed during rolling upgrade scenario: + * a bookies goes down and comes up, the next bookie go down and up and so on. + * At any time only one bookie is down. + */ + @Test + public void testDelayedAuditWithRollingUpgrade() throws Exception { + // wait for the periodic bookie check to finish + Thread.sleep(1000); + + // create a ledger with a bunch of entries + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size()); + + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(5); + + // shutdown a non auditor bookie to avoid an election + int idx1 = getShutDownNonAuditorBookieIdx(""); + ServerConfiguration conf1 = confByIndex(idx1); + AtomicReference shutdownBookieRef1 = new AtomicReference<>(); + CountDownLatch shutdownLatch1 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie1 = shutdownBookie(idx1); + shutdownBookieRef1.set(shutdownBookie1); + shutdownLatch1.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // wait for 2 seconds and there shouldn't be any under replicated ledgers + // because we have delayed the start of audit by 5 seconds + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // restart the bookie we shut down above + startAndAddBookie(conf1); + + // Now to simulate the rolling upgrade, bring down a bookie different from + // the one we brought down/up above. + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef2 = new AtomicReference<>(); + CountDownLatch shutdownLatch2 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie2 = shutDownNonAuditorBookie(); + shutdownBookieRef2.set(shutdownBookie2); + shutdownLatch2.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // since the first bookie that was brought down/up has come up, there is only + // one bookie down at this time. Hence the lost bookie check shouldn't start + // immediately; it will start 5 seconds after the second bookie went down + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // wait for a total of 6 seconds(2+4) for the ledgers to get reported as under replicated + Thread.sleep(4000); + + // If the following checks pass, it means that auditing happened + // after lostBookieRecoveryDelay during rolling upgrade as expected + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch1.await(); + shutdownLatch2.await(); + assertTrue("Bookie " + shutdownBookieRef1.get() + "wrongly listed as missing the ledger: " + data, + !data.contains(shutdownBookieRef1.get())); + assertTrue("Bookie " + shutdownBookieRef2.get() + + " is not listed in the ledger as missing replicas :" + data, + data.contains(shutdownBookieRef2.get())); + LOG.info("*****************Test Complete"); + } + + private void waitForAuditToComplete() throws Exception { + long endTime = System.currentTimeMillis() + 5_000; + while (System.currentTimeMillis() < endTime) { + Auditor auditor = getAuditorBookiesAuditor(); + if (auditor != null) { + Future task = auditor.submitAuditTask(); + task.get(5, TimeUnit.SECONDS); + return; + } + Thread.sleep(100); + } + throw new TimeoutException("Could not find an audit within 5 seconds"); + } + + /** + * Wait for ledger to be underreplicated, and to be missing all replicas specified. + */ + private boolean waitForLedgerMissingReplicas(Long ledgerId, long secondsToWait, String... replicas) + throws Exception { + for (int i = 0; i < secondsToWait; i++) { + try { + UnderreplicatedLedger data = urLedgerMgr.getLedgerUnreplicationInfo(ledgerId); + boolean all = true; + for (String r : replicas) { + all = all && data.getReplicaList().contains(r); + } + if (all) { + return true; + } + } catch (Exception e) { + // may not find node + } + Thread.sleep(1000); + } + return false; + } + + private CountDownLatch registerUrLedgerWatcher(int count) + throws KeeperException, InterruptedException { + final CountDownLatch underReplicaLatch = new CountDownLatch(count); + for (Long ledgerId : ledgerList) { + Watcher urLedgerWatcher = new ChildWatcher(underReplicaLatch); + String znode = ZkLedgerUnderreplicationManager.getUrLedgerZnode(underreplicatedPath, + ledgerId); + zkc.exists(znode, urLedgerWatcher); + } + return underReplicaLatch; + } + + private void doLedgerRereplication(Long... ledgerIds) + throws UnavailableException { + for (int i = 0; i < ledgerIds.length; i++) { + long lid = urLedgerMgr.getLedgerToRereplicate(); + assertTrue("Received unexpected ledgerid", Arrays.asList(ledgerIds).contains(lid)); + urLedgerMgr.markLedgerReplicated(lid); + urLedgerMgr.releaseUnderreplicatedLedger(lid); + } + } + + private String shutdownBookie(int bkShutdownIndex) throws Exception { + BookieServer bkServer = serverByIndex(bkShutdownIndex); + String bookieAddr = bkServer.getBookieId().toString(); + if (LOG.isInfoEnabled()) { + LOG.info("Shutting down bookie:" + bookieAddr); + } + killBookie(bkShutdownIndex); + auditorElectors.get(bookieAddr).shutdown(); + auditorElectors.remove(bookieAddr); + return bookieAddr; + } + + private LedgerHandle createAndAddEntriesToLedger() throws BKException, + InterruptedException { + int numEntriesToWrite = 100; + // Create a ledger + LedgerHandle lh = bkc.createLedger(digestType, ledgerPassword); + LOG.info("Ledger ID: " + lh.getId()); + addEntry(numEntriesToWrite, lh); + return lh; + } + + private void addEntry(int numEntriesToWrite, LedgerHandle lh) + throws InterruptedException, BKException { + final CountDownLatch completeLatch = new CountDownLatch(numEntriesToWrite); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(Integer.MAX_VALUE)); + entry.position(0); + lh.asyncAddEntry(entry.array(), new AddCallback() { + public void addComplete(int rc2, LedgerHandle lh, long entryId, Object ctx) { + rc.compareAndSet(BKException.Code.OK, rc2); + completeLatch.countDown(); + } + }, null); + } + completeLatch.await(); + if (rc.get() != BKException.Code.OK) { + throw BKException.create(rc.get()); + } + + } + + private Map getUrLedgerData(Set urLedgerList) + throws KeeperException, InterruptedException { + Map urLedgerData = new HashMap(); + for (Long ledgerId : urLedgerList) { + String znode = ZkLedgerUnderreplicationManager.getUrLedgerZnode(underreplicatedPath, + ledgerId); + byte[] data = zkc.getData(znode, false, null); + urLedgerData.put(ledgerId, new String(data)); + } + return urLedgerData; + } + + private class ChildWatcher implements Watcher { + private final CountDownLatch underReplicaLatch; + + public ChildWatcher(CountDownLatch underReplicaLatch) { + this.underReplicaLatch = underReplicaLatch; + } + + @Override + public void process(WatchedEvent event) { + LOG.info("Received notification for the ledger path : " + + event.getPath()); + for (Long ledgerId : ledgerList) { + if (event.getPath().contains(ledgerId + "")) { + urLedgerList.add(ledgerId); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Count down and waiting for next notification"); + } + // count down and waiting for next notification + underReplicaLatch.countDown(); + } + } + + private BookieServer getAuditorBookie() throws Exception { + List auditors = new LinkedList(); + byte[] data = zkc.getData(electionPath, false, null); + assertNotNull("Auditor election failed", data); + for (int i = 0; i < bookieCount(); i++) { + BookieId bookieId = addressByIndex(i); + if (new String(data).contains(bookieId + "")) { + auditors.add(serverByIndex(i)); + } + } + assertEquals("Multiple Bookies acting as Auditor!", 1, auditors + .size()); + return auditors.get(0); + } + + private Auditor getAuditorBookiesAuditor() throws Exception { + BookieServer auditorBookieServer = getAuditorBookie(); + String bookieAddr = auditorBookieServer.getBookieId().toString(); + return auditorElectors.get(bookieAddr).auditor; + } + + private String shutDownNonAuditorBookie() throws Exception { + // shutdown bookie which is not an auditor + int indexOf = indexOfServer(getAuditorBookie()); + int bkIndexDownBookie; + if (indexOf < lastBookieIndex()) { + bkIndexDownBookie = indexOf + 1; + } else { + bkIndexDownBookie = indexOf - 1; + } + return shutdownBookie(bkIndexDownBookie); + } + + private int getShutDownNonAuditorBookieIdx(String exclude) throws Exception { + // shutdown bookie which is not an auditor + int indexOf = indexOfServer(getAuditorBookie()); + int bkIndexDownBookie = 0; + for (int i = 0; i <= lastBookieIndex(); i++) { + if (i == indexOf || addressByIndex(i).toString().equals(exclude)) { + continue; + } + bkIndexDownBookie = i; + break; + } + return bkIndexDownBookie; + } + + private String shutDownNonAuditorBookie(String exclude) throws Exception { + return shutdownBookie(getShutDownNonAuditorBookieIdx(exclude)); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicBookieCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicBookieCheckTest.java new file mode 100644 index 0000000000000..9e8c5a54a5d91 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicBookieCheckTest.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithLedgerManagerFactory; +import static org.testng.AssertJUnit.assertEquals; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.UncheckedExecutionException; +import lombok.Cleanup; +import org.apache.bookkeeper.client.ClientUtil; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.conf.TestBKConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies that the period check on the auditor + * will pick up on missing data in the client. + */ +public class AuditorPeriodicBookieCheckTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorPeriodicBookieCheckTest.class); + + private AuditorElector auditorElector = null; + + private static final int CHECK_INTERVAL = 1; // run every second + + public AuditorPeriodicBookieCheckTest() throws Exception { + super(3); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + + ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); + conf.setAuditorPeriodicBookieCheckInterval(CHECK_INTERVAL); + + conf.setMetadataServiceUri( + metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + conf.setProperty("clientConnectTimeoutMillis", 500); + String addr = addressByIndex(0).toString(); + + auditorElector = new AuditorElector(addr, conf); + auditorElector.start(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + auditorElector.shutdown(); + super.tearDown(); + } + + /** + * Test that the periodic bookie checker works. + */ + @Test + public void testPeriodicBookieCheckInterval() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + runFunctionWithLedgerManagerFactory(confByIndex(0), mFactory -> { + try (LedgerManager ledgerManager = mFactory.newLedgerManager()) { + @Cleanup final LedgerUnderreplicationManager underReplicationManager = + mFactory.newLedgerUnderreplicationManager(); + long ledgerId = 12345L; + ClientUtil.setupLedger(bkc.getLedgerManager(), ledgerId, + LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList( + new BookieSocketAddress("192.0.2.1", 1000).toBookieId(), + getBookie(0), + getBookie(1)))); + long underReplicatedLedger = -1; + for (int i = 0; i < 10; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", ledgerId, underReplicatedLedger); + } catch (Exception e) { + throw new UncheckedExecutionException(e.getMessage(), e); + } + return null; + }); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicCheckTest.java new file mode 100644 index 0000000000000..9c5805dc536d6 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicCheckTest.java @@ -0,0 +1,1070 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotSame; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import io.netty.buffer.ByteBuf; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.bookkeeper.bookie.Bookie; +import org.apache.bookkeeper.bookie.BookieAccessor; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.bookie.IndexPersistenceMgr; +import org.apache.bookkeeper.bookie.TestBookieImpl; +import org.apache.bookkeeper.client.AsyncCallback.AddCallback; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.Counter; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.awaitility.Awaitility; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies that the period check on the auditor + * will pick up on missing data in the client. + */ +public class AuditorPeriodicCheckTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorPeriodicCheckTest.class); + + private MetadataBookieDriver driver; + private HashMap auditorElectors = new HashMap(); + + private static final int CHECK_INTERVAL = 1; // run every second + + public AuditorPeriodicCheckTest() throws Exception { + super(3); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + + for (int i = 0; i < numBookies; i++) { + ServerConfiguration conf = new ServerConfiguration(confByIndex(i)); + conf.setAuditorPeriodicCheckInterval(CHECK_INTERVAL); + conf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + + String addr = addressByIndex(i).toString(); + + AuditorElector auditorElector = new AuditorElector(addr, conf); + auditorElectors.put(addr, auditorElector); + auditorElector.start(); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting Auditor Elector"); + } + } + + URI uri = URI.create(confByIndex(0).getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver = MetadataDrivers.getBookieDriver(uri); + ServerConfiguration serverConfiguration = new ServerConfiguration(confByIndex(0)); + serverConfiguration.setMetadataServiceUri( + serverConfiguration.getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver.initialize(serverConfiguration, NullStatsLogger.INSTANCE); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (null != driver) { + driver.close(); + } + + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + super.tearDown(); + } + + /** + * test that the periodic checking will detect corruptions in + * the bookie entry log. + */ + @Test + public void testEntryLogCorruption() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + underReplicationManager.disableLedgerReplication(); + + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + long ledgerId = lh.getId(); + for (int i = 0; i < 100; i++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + BookieAccessor.forceFlush((BookieImpl) serverByIndex(0).getBookie()); + + + File ledgerDir = confByIndex(0).getLedgerDirs()[0]; + ledgerDir = BookieImpl.getCurrentDirectory(ledgerDir); + // corrupt of entryLogs + File[] entryLogs = ledgerDir.listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + return name.endsWith(".log"); + } + }); + ByteBuffer junk = ByteBuffer.allocate(1024 * 1024); + for (File f : entryLogs) { + FileOutputStream out = new FileOutputStream(f); + out.getChannel().write(junk); + out.close(); + } + restartBookies(); // restart to clear read buffers + + underReplicationManager.enableLedgerReplication(); + long underReplicatedLedger = -1; + for (int i = 0; i < 10; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", ledgerId, underReplicatedLedger); + underReplicationManager.close(); + } + + /** + * test that the period checker will detect corruptions in + * the bookie index files. + */ + @Test + public void testIndexCorruption() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + long ledgerToCorrupt = lh.getId(); + for (int i = 0; i < 100; i++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + // push ledgerToCorrupt out of page cache (bookie is configured to only use 1 page) + lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + for (int i = 0; i < 100; i++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + BookieAccessor.forceFlush((BookieImpl) serverByIndex(0).getBookie()); + + File ledgerDir = confByIndex(0).getLedgerDirs()[0]; + ledgerDir = BookieImpl.getCurrentDirectory(ledgerDir); + + // corrupt of entryLogs + File index = new File(ledgerDir, IndexPersistenceMgr.getLedgerName(ledgerToCorrupt)); + LOG.info("file to corrupt{}", index); + ByteBuffer junk = ByteBuffer.allocate(1024 * 1024); + FileOutputStream out = new FileOutputStream(index); + out.getChannel().write(junk); + out.close(); + + long underReplicatedLedger = -1; + for (int i = 0; i < 15; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", ledgerToCorrupt, underReplicatedLedger); + underReplicationManager.close(); + } + + /** + * Test that the period checker will not run when auto replication has been disabled. + */ + @Test + public void testPeriodicCheckWhenDisabled() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + final LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + final int numLedgers = 10; + final int numMsgs = 2; + final CountDownLatch completeLatch = new CountDownLatch(numMsgs * numLedgers); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + + List lhs = new ArrayList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + lhs.add(lh); + for (int j = 0; j < 2; j++) { + lh.asyncAddEntry("testdata".getBytes(), new AddCallback() { + public void addComplete(int rc2, LedgerHandle lh, long entryId, Object ctx) { + if (rc.compareAndSet(BKException.Code.OK, rc2)) { + LOG.info("Failed to add entry : {}", BKException.getMessage(rc2)); + } + completeLatch.countDown(); + } + }, null); + } + } + completeLatch.await(); + if (rc.get() != BKException.Code.OK) { + throw BKException.create(rc.get()); + } + + for (LedgerHandle lh : lhs) { + lh.close(); + } + + underReplicationManager.disableLedgerReplication(); + + final AtomicInteger numReads = new AtomicInteger(0); + ServerConfiguration conf = killBookie(0); + + Bookie deadBookie = new TestBookieImpl(conf) { + @Override + public ByteBuf readEntry(long ledgerId, long entryId) + throws IOException, NoLedgerException { + // we want to disable during checking + numReads.incrementAndGet(); + throw new IOException("Fake I/O exception"); + } + }; + startAndAddBookie(conf, deadBookie); + + Thread.sleep(CHECK_INTERVAL * 2000); + assertEquals("Nothing should have tried to read", 0, numReads.get()); + underReplicationManager.enableLedgerReplication(); + Thread.sleep(CHECK_INTERVAL * 2000); // give it time to run + + underReplicationManager.disableLedgerReplication(); + // give it time to stop, from this point nothing new should be marked + Thread.sleep(CHECK_INTERVAL * 2000); + + int numUnderreplicated = 0; + long underReplicatedLedger = -1; + do { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger == -1) { + break; + } + numUnderreplicated++; + + underReplicationManager.markLedgerReplicated(underReplicatedLedger); + } while (underReplicatedLedger != -1); + + Thread.sleep(CHECK_INTERVAL * 2000); // give a chance to run again (it shouldn't, it's disabled) + + // ensure that nothing is marked as underreplicated + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + assertEquals("There should be no underreplicated ledgers", -1, underReplicatedLedger); + + LOG.info("{} of {} ledgers underreplicated", numUnderreplicated, numUnderreplicated); + assertTrue("All should be underreplicated", + numUnderreplicated <= numLedgers && numUnderreplicated > 0); + } + + /** + * Test that the period check will succeed if a ledger is deleted midway. + */ + @Test + public void testPeriodicCheckWhenLedgerDeleted() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + try (final Auditor auditor = new Auditor( + BookieImpl.getBookieId(confByIndex(0)).toString(), + confByIndex(0), NullStatsLogger.INSTANCE)) { + final AtomicBoolean exceptionCaught = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + Thread t = new Thread() { + public void run() { + try { + latch.countDown(); + for (int i = 0; i < numLedgers; i++) { + ((AuditorCheckAllLedgersTask) auditor.auditorCheckAllLedgersTask).checkAllLedgers(); + } + } catch (Exception e) { + LOG.error("Caught exception while checking all ledgers", e); + exceptionCaught.set(true); + } + } + }; + t.start(); + latch.await(); + for (Long id : ids) { + bkc.deleteLedger(id); + } + t.join(); + assertFalse("Shouldn't have thrown exception", exceptionCaught.get()); + } + } + + @Test + public void testGetLedgerFromZookeeperThrottled() throws Exception { + final int numberLedgers = 30; + + // write ledgers into bookkeeper cluster + try { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + for (int i = 0; i < numberLedgers; ++i) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + for (int j = 0; j < 5; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + } catch (InterruptedException | BKException e) { + LOG.error("Failed to shutdown auditor elector or write data to ledgers ", e); + fail(); + } + + // create auditor and call `checkAllLedgers` + ServerConfiguration configuration = confByIndex(0); + configuration.setAuditorMaxNumberOfConcurrentOpenLedgerOperations(10); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numLedgersChecked = statsLogger + .getCounter(ReplicationStats.NUM_LEDGERS_CHECKED); + Auditor auditor = new Auditor(BookieImpl.getBookieId(configuration).toString(), + configuration, statsLogger); + + try { + ((AuditorCheckAllLedgersTask) auditor.auditorCheckAllLedgersTask).checkAllLedgers(); + assertEquals("NUM_LEDGERS_CHECKED", numberLedgers, (long) numLedgersChecked.get()); + } catch (Exception e) { + LOG.error("Caught exception while checking all ledgers ", e); + fail(); + } + } + + @Test + public void testInitialDelayOfCheckAllLedgers() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + validateInitialDelayOfCheckAllLedgers(urm, -1, 1000, servConf, bkc); + validateInitialDelayOfCheckAllLedgers(urm, 999, 1000, servConf, bkc); + validateInitialDelayOfCheckAllLedgers(urm, 1001, 1000, servConf, bkc); + } + + void validateInitialDelayOfCheckAllLedgers(LedgerUnderreplicationManager urm, long timeSinceLastExecutedInSecs, + long auditorPeriodicCheckInterval, ServerConfiguration servConf, + BookKeeper bkc) + throws UnavailableException, UnknownHostException, InterruptedException { + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger checkAllLedgersStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.CHECK_ALL_LEDGERS_TIME); + servConf.setAuditorPeriodicCheckInterval(auditorPeriodicCheckInterval); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, + statsLogger, null); + CountDownLatch latch = auditor.getLatch(); + assertEquals("CHECK_ALL_LEDGERS_TIME SuccessCount", 0, checkAllLedgersStatsLogger.getSuccessCount()); + long curTimeBeforeStart = System.currentTimeMillis(); + long checkAllLedgersCTime = -1; + long initialDelayInMsecs = -1; + long nextExpectedCheckAllLedgersExecutionTime = -1; + long bufferTimeInMsecs = 12000L; + if (timeSinceLastExecutedInSecs == -1) { + /* + * if we are setting checkAllLedgersCTime to -1, it means that + * checkAllLedgers hasn't run before. So initialDelay for + * checkAllLedgers should be 0. + */ + checkAllLedgersCTime = -1; + initialDelayInMsecs = 0; + } else { + checkAllLedgersCTime = curTimeBeforeStart - timeSinceLastExecutedInSecs * 1000L; + initialDelayInMsecs = timeSinceLastExecutedInSecs > auditorPeriodicCheckInterval ? 0 + : (auditorPeriodicCheckInterval - timeSinceLastExecutedInSecs) * 1000L; + } + /* + * next checkAllLedgers should happen atleast after + * nextExpectedCheckAllLedgersExecutionTime. + */ + nextExpectedCheckAllLedgersExecutionTime = curTimeBeforeStart + initialDelayInMsecs; + + urm.setCheckAllLedgersCTime(checkAllLedgersCTime); + auditor.start(); + /* + * since auditorPeriodicCheckInterval are higher values (in the order of + * 100s of seconds), its ok bufferTimeInMsecs to be ` 10 secs. + */ + assertTrue("checkAllLedgers should have executed with initialDelay " + initialDelayInMsecs, + latch.await(initialDelayInMsecs + bufferTimeInMsecs, TimeUnit.MILLISECONDS)); + for (int i = 0; i < 10; i++) { + Thread.sleep(100); + if (checkAllLedgersStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("CHECK_ALL_LEDGERS_TIME SuccessCount", 1, checkAllLedgersStatsLogger.getSuccessCount()); + long currentCheckAllLedgersCTime = urm.getCheckAllLedgersCTime(); + assertTrue( + "currentCheckAllLedgersCTime: " + currentCheckAllLedgersCTime + + " should be greater than nextExpectedCheckAllLedgersExecutionTime: " + + nextExpectedCheckAllLedgersExecutionTime, + currentCheckAllLedgersCTime > nextExpectedCheckAllLedgersExecutionTime); + assertTrue( + "currentCheckAllLedgersCTime: " + currentCheckAllLedgersCTime + + " should be lesser than nextExpectedCheckAllLedgersExecutionTime+bufferTimeInMsecs: " + + (nextExpectedCheckAllLedgersExecutionTime + bufferTimeInMsecs), + currentCheckAllLedgersCTime < (nextExpectedCheckAllLedgersExecutionTime + bufferTimeInMsecs)); + auditor.close(); + } + + @Test + public void testInitialDelayOfPlacementPolicyCheck() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + validateInitialDelayOfPlacementPolicyCheck(urm, -1, 1000, servConf, bkc); + validateInitialDelayOfPlacementPolicyCheck(urm, 999, 1000, servConf, bkc); + validateInitialDelayOfPlacementPolicyCheck(urm, 1001, 1000, servConf, bkc); + } + + void validateInitialDelayOfPlacementPolicyCheck(LedgerUnderreplicationManager urm, long timeSinceLastExecutedInSecs, + long auditorPeriodicPlacementPolicyCheckInterval, + ServerConfiguration servConf, BookKeeper bkc) + throws UnavailableException, UnknownHostException, InterruptedException { + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger placementPolicyCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(auditorPeriodicPlacementPolicyCheckInterval); + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, + statsLogger, null); + CountDownLatch latch = auditor.getLatch(); + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 0, placementPolicyCheckStatsLogger.getSuccessCount()); + long curTimeBeforeStart = System.currentTimeMillis(); + long placementPolicyCheckCTime = -1; + long initialDelayInMsecs = -1; + long nextExpectedPlacementPolicyCheckExecutionTime = -1; + long bufferTimeInMsecs = 20000L; + if (timeSinceLastExecutedInSecs == -1) { + /* + * if we are setting placementPolicyCheckCTime to -1, it means that + * placementPolicyCheck hasn't run before. So initialDelay for + * placementPolicyCheck should be 0. + */ + placementPolicyCheckCTime = -1; + initialDelayInMsecs = 0; + } else { + placementPolicyCheckCTime = curTimeBeforeStart - timeSinceLastExecutedInSecs * 1000L; + initialDelayInMsecs = timeSinceLastExecutedInSecs > auditorPeriodicPlacementPolicyCheckInterval ? 0 + : (auditorPeriodicPlacementPolicyCheckInterval - timeSinceLastExecutedInSecs) * 1000L; + } + /* + * next placementPolicyCheck should happen atleast after + * nextExpectedPlacementPolicyCheckExecutionTime. + */ + nextExpectedPlacementPolicyCheckExecutionTime = curTimeBeforeStart + initialDelayInMsecs; + + urm.setPlacementPolicyCheckCTime(placementPolicyCheckCTime); + auditor.start(); + /* + * since auditorPeriodicPlacementPolicyCheckInterval are higher values (in the + * order of 100s of seconds), its ok bufferTimeInMsecs to be ` 20 secs. + */ + assertTrue("placementPolicyCheck should have executed with initialDelay " + initialDelayInMsecs, + latch.await(initialDelayInMsecs + bufferTimeInMsecs, TimeUnit.MILLISECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (placementPolicyCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 1, placementPolicyCheckStatsLogger.getSuccessCount()); + long currentPlacementPolicyCheckCTime = urm.getPlacementPolicyCheckCTime(); + assertTrue( + "currentPlacementPolicyCheckCTime: " + currentPlacementPolicyCheckCTime + + " should be greater than nextExpectedPlacementPolicyCheckExecutionTime: " + + nextExpectedPlacementPolicyCheckExecutionTime, + currentPlacementPolicyCheckCTime > nextExpectedPlacementPolicyCheckExecutionTime); + assertTrue( + "currentPlacementPolicyCheckCTime: " + currentPlacementPolicyCheckCTime + + " should be lesser than nextExpectedPlacementPolicyCheckExecutionTime+bufferTimeInMsecs: " + + (nextExpectedPlacementPolicyCheckExecutionTime + bufferTimeInMsecs), + currentPlacementPolicyCheckCTime < (nextExpectedPlacementPolicyCheckExecutionTime + bufferTimeInMsecs)); + auditor.close(); + } + + @Test + public void testInitialDelayOfReplicasCheck() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + LedgerHandle lh = bkc.createLedger(3, 2, DigestType.CRC32, "passwd".getBytes()); + for (int j = 0; j < 5; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + long ledgerId = 100000L; + lh = bkc.createLedgerAdv(ledgerId, 3, 2, 2, DigestType.CRC32, "passwd".getBytes(), null); + lh.close(); + + ledgerId = 100001234L; + lh = bkc.createLedgerAdv(ledgerId, 3, 3, 2, DigestType.CRC32, "passwd".getBytes(), null); + for (int j = 0; j < 4; j++) { + lh.addEntry(j, "testdata".getBytes()); + } + lh.close(); + + ledgerId = 991234L; + lh = bkc.createLedgerAdv(ledgerId, 3, 2, 2, DigestType.CRC32, "passwd".getBytes(), null); + lh.addEntry(0, "testdata".getBytes()); + lh.close(); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + validateInitialDelayOfReplicasCheck(urm, -1, 1000, servConf, bkc); + validateInitialDelayOfReplicasCheck(urm, 999, 1000, servConf, bkc); + validateInitialDelayOfReplicasCheck(urm, 1001, 1000, servConf, bkc); + } + + void validateInitialDelayOfReplicasCheck(LedgerUnderreplicationManager urm, long timeSinceLastExecutedInSecs, + long auditorPeriodicReplicasCheckInterval, ServerConfiguration servConf, + BookKeeper bkc) + throws UnavailableException, UnknownHostException, InterruptedException { + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger replicasCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME); + servConf.setAuditorPeriodicReplicasCheckInterval(auditorPeriodicReplicasCheckInterval); + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, + statsLogger, null); + CountDownLatch latch = auditor.getLatch(); + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 0, replicasCheckStatsLogger.getSuccessCount()); + long curTimeBeforeStart = System.currentTimeMillis(); + long replicasCheckCTime = -1; + long initialDelayInMsecs = -1; + long nextExpectedReplicasCheckExecutionTime = -1; + long bufferTimeInMsecs = 20000L; + if (timeSinceLastExecutedInSecs == -1) { + /* + * if we are setting replicasCheckCTime to -1, it means that + * replicasCheck hasn't run before. So initialDelay for + * replicasCheck should be 0. + */ + replicasCheckCTime = -1; + initialDelayInMsecs = 0; + } else { + replicasCheckCTime = curTimeBeforeStart - timeSinceLastExecutedInSecs * 1000L; + initialDelayInMsecs = timeSinceLastExecutedInSecs > auditorPeriodicReplicasCheckInterval ? 0 + : (auditorPeriodicReplicasCheckInterval - timeSinceLastExecutedInSecs) * 1000L; + } + /* + * next replicasCheck should happen atleast after + * nextExpectedReplicasCheckExecutionTime. + */ + nextExpectedReplicasCheckExecutionTime = curTimeBeforeStart + initialDelayInMsecs; + + urm.setReplicasCheckCTime(replicasCheckCTime); + auditor.start(); + /* + * since auditorPeriodicReplicasCheckInterval are higher values (in the + * order of 100s of seconds), its ok bufferTimeInMsecs to be ` 20 secs. + */ + assertTrue("replicasCheck should have executed with initialDelay " + initialDelayInMsecs, + latch.await(initialDelayInMsecs + bufferTimeInMsecs, TimeUnit.MILLISECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (replicasCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 1, replicasCheckStatsLogger.getSuccessCount()); + long currentReplicasCheckCTime = urm.getReplicasCheckCTime(); + assertTrue( + "currentReplicasCheckCTime: " + currentReplicasCheckCTime + + " should be greater than nextExpectedReplicasCheckExecutionTime: " + + nextExpectedReplicasCheckExecutionTime, + currentReplicasCheckCTime > nextExpectedReplicasCheckExecutionTime); + assertTrue( + "currentReplicasCheckCTime: " + currentReplicasCheckCTime + + " should be lesser than nextExpectedReplicasCheckExecutionTime+bufferTimeInMsecs: " + + (nextExpectedReplicasCheckExecutionTime + bufferTimeInMsecs), + currentReplicasCheckCTime < (nextExpectedReplicasCheckExecutionTime + bufferTimeInMsecs)); + auditor.close(); + } + + @Test + public void testDelayBookieAuditOfCheckAllLedgers() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numBookieAuditsDelayed = + statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); + TestOpStatsLogger underReplicatedLedgerTotalSizeStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.UNDER_REPLICATED_LEDGERS_TOTAL_SIZE); + + servConf.setAuditorPeriodicCheckInterval(1); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(Long.MAX_VALUE); + + urm.setLostBookieRecoveryDelay(Integer.MAX_VALUE); + + AtomicBoolean canRun = new AtomicBoolean(false); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, + false, statsLogger, canRun); + final CountDownLatch latch = auditor.getLatch(); + + auditor.start(); + + killBookie(addressByIndex(0)); + + Awaitility.await().untilAsserted(() -> assertEquals(1, (long) numBookieAuditsDelayed.get())); + final Future auditTask = auditor.auditTask; + assertTrue(auditTask != null && !auditTask.isDone()); + + canRun.set(true); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + assertTrue(auditor.auditTask.equals(auditTask) + && auditor.auditTask != null && !auditor.auditTask.isDone()); + // wrong num is numLedgers, right num is 0 + assertEquals("UNDER_REPLICATED_LEDGERS_TOTAL_SIZE", + 0, + underReplicatedLedgerTotalSizeStatsLogger.getSuccessCount()); + + auditor.close(); + } + + @Test + public void testDelayBookieAuditOfPlacementPolicy() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numBookieAuditsDelayed = + statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); + TestOpStatsLogger placementPolicyCheckTime = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(1); + servConf.setAuditorPeriodicBookieCheckInterval(Long.MAX_VALUE); + + urm.setLostBookieRecoveryDelay(Integer.MAX_VALUE); + + AtomicBoolean canRun = new AtomicBoolean(false); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, + false, statsLogger, canRun); + final CountDownLatch latch = auditor.getLatch(); + + auditor.start(); + + killBookie(addressByIndex(0)); + + Awaitility.await().untilAsserted(() -> assertEquals(1, (long) numBookieAuditsDelayed.get())); + final Future auditTask = auditor.auditTask; + assertTrue(auditTask != null && !auditTask.isDone()); + assertEquals("PLACEMENT_POLICY_CHECK_TIME", 0, placementPolicyCheckTime.getSuccessCount()); + + canRun.set(true); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + assertTrue(auditor.auditTask.equals(auditTask) + && auditor.auditTask != null && !auditor.auditTask.isDone()); + // wrong successCount is > 0, right successCount is = 0 + assertEquals("PLACEMENT_POLICY_CHECK_TIME", 0, placementPolicyCheckTime.getSuccessCount()); + + auditor.close(); + } + + @Test + public void testDelayBookieAuditOfReplicasCheck() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numBookieAuditsDelayed = + statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); + TestOpStatsLogger replicasCheckTime = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME); + + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(Long.MAX_VALUE); + servConf.setAuditorPeriodicReplicasCheckInterval(1); + + urm.setLostBookieRecoveryDelay(Integer.MAX_VALUE); + + AtomicBoolean canRun = new AtomicBoolean(false); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, + false, statsLogger, canRun); + final CountDownLatch latch = auditor.getLatch(); + + auditor.start(); + + killBookie(addressByIndex(0)); + + Awaitility.await().untilAsserted(() -> assertEquals(1, (long) numBookieAuditsDelayed.get())); + final Future auditTask = auditor.auditTask; + assertTrue(auditTask != null && !auditTask.isDone()); + assertEquals("REPLICAS_CHECK_TIME", 0, replicasCheckTime.getSuccessCount()); + + canRun.set(true); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + assertTrue(auditor.auditTask.equals(auditTask) + && auditor.auditTask != null && !auditor.auditTask.isDone()); + // wrong successCount is > 0, right successCount is = 0 + assertEquals("REPLICAS_CHECK_TIME", 0, replicasCheckTime.getSuccessCount()); + + auditor.close(); + } + + static class TestAuditor extends Auditor { + + final AtomicReference latchRef = new AtomicReference(new CountDownLatch(1)); + + public TestAuditor(String bookieIdentifier, ServerConfiguration conf, BookKeeper bkc, boolean ownBkc, + StatsLogger statsLogger, AtomicBoolean exceptedRun) throws UnavailableException { + super(bookieIdentifier, conf, bkc, ownBkc, statsLogger); + renewAuditorTestWrapperTask(exceptedRun); + } + + public TestAuditor(String bookieIdentifier, ServerConfiguration conf, BookKeeper bkc, boolean ownBkc, + BookKeeperAdmin bkadmin, boolean ownadmin, StatsLogger statsLogger, + AtomicBoolean exceptedRun) throws UnavailableException { + super(bookieIdentifier, conf, bkc, ownBkc, bkadmin, ownadmin, statsLogger); + renewAuditorTestWrapperTask(exceptedRun); + } + + public TestAuditor(final String bookieIdentifier, ServerConfiguration conf, StatsLogger statsLogger, + AtomicBoolean exceptedRun) + throws UnavailableException { + super(bookieIdentifier, conf, statsLogger); + renewAuditorTestWrapperTask(exceptedRun); + } + + private void renewAuditorTestWrapperTask(AtomicBoolean exceptedRun) { + super.auditorCheckAllLedgersTask = + new AuditorTestWrapperTask(super.auditorCheckAllLedgersTask, latchRef, exceptedRun); + super.auditorPlacementPolicyCheckTask = + new AuditorTestWrapperTask(super.auditorPlacementPolicyCheckTask, latchRef, exceptedRun); + super.auditorReplicasCheckTask = + new AuditorTestWrapperTask(super.auditorReplicasCheckTask, latchRef, exceptedRun); + } + + CountDownLatch getLatch() { + return latchRef.get(); + } + + void setLatch(CountDownLatch latch) { + latchRef.set(latch); + } + + private static class AuditorTestWrapperTask extends AuditorTask { + private final AuditorTask innerTask; + private final AtomicReference latchRef; + private final AtomicBoolean exceptedRun; + + AuditorTestWrapperTask(AuditorTask innerTask, + AtomicReference latchRef, + AtomicBoolean exceptedRun) { + super(null, null, null, null, null, + null, null); + this.innerTask = innerTask; + this.latchRef = latchRef; + this.exceptedRun = exceptedRun; + } + + @Override + protected void runTask() { + if (exceptedRun == null || exceptedRun.get()) { + innerTask.runTask(); + latchRef.get().countDown(); + } + } + + @Override + public void shutdown() { + innerTask.shutdown(); + } + } + } + + private BookieId replaceBookieWithWriteFailingBookie(LedgerHandle lh) throws Exception { + int bookieIdx = -1; + Long entryId = lh.getLedgerMetadata().getAllEnsembles().firstKey(); + List curEnsemble = lh.getLedgerMetadata().getAllEnsembles().get(entryId); + + // Identify a bookie in the current ledger ensemble to be replaced + BookieId replacedBookie = null; + for (int i = 0; i < numBookies; i++) { + if (curEnsemble.contains(addressByIndex(i))) { + bookieIdx = i; + replacedBookie = addressByIndex(i); + break; + } + } + assertNotSame("Couldn't find ensemble bookie in bookie list", -1, bookieIdx); + + LOG.info("Killing bookie " + addressByIndex(bookieIdx)); + ServerConfiguration conf = killBookie(bookieIdx); + Bookie writeFailingBookie = new TestBookieImpl(conf) { + @Override + public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, + Object ctx, byte[] masterKey) + throws IOException, BookieException { + try { + LOG.info("Failing write to entry "); + // sleep a bit so that writes to other bookies succeed before + // the client hears about the failure on this bookie. If the + // client gets ack-quorum number of acks first, it won't care + // about any failures and won't reform the ensemble. + Thread.sleep(100); + throw new IOException(); + } catch (InterruptedException ie) { + // ignore, only interrupted if shutting down, + // and an exception would spam the logs + Thread.currentThread().interrupt(); + } + } + }; + startAndAddBookie(conf, writeFailingBookie); + return replacedBookie; + } + + /* + * Validates that the periodic ledger check will fix entries with a failed write. + */ + @Test + public void testFailedWriteRecovery() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + underReplicationManager.disableLedgerReplication(); + + LedgerHandle lh = bkc.createLedger(2, 2, 1, DigestType.CRC32, "passwd".getBytes()); + + // kill one of the bookies and replace it with one that rejects write; + // This way we get into the under replication state + BookieId replacedBookie = replaceBookieWithWriteFailingBookie(lh); + + // Write a few entries; this should cause under replication + byte[] data = "foobar".getBytes(); + data = "foobar".getBytes(); + lh.addEntry(data); + lh.addEntry(data); + lh.addEntry(data); + + lh.close(); + + // enable under replication detection and wait for it to report + // under replicated ledger + underReplicationManager.enableLedgerReplication(); + long underReplicatedLedger = -1; + for (int i = 0; i < 5; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", lh.getId(), underReplicatedLedger); + + // now start the replication workers + List l = new ArrayList(); + for (int i = 0; i < numBookies; i++) { + ReplicationWorker rw = new ReplicationWorker(confByIndex(i), NullStatsLogger.INSTANCE); + rw.start(); + l.add(rw); + } + underReplicationManager.close(); + + // Wait for ensemble to change after replication + Thread.sleep(3000); + for (ReplicationWorker rw : l) { + rw.shutdown(); + } + + // check that ensemble has changed and the bookie that rejected writes has + // been replaced in the ensemble + LedgerHandle newLh = bkc.openLedger(lh.getId(), DigestType.CRC32, "passwd".getBytes()); + for (Map.Entry> e : + newLh.getLedgerMetadata().getAllEnsembles().entrySet()) { + List ensemble = e.getValue(); + assertFalse("Ensemble hasn't been updated", ensemble.contains(replacedBookie)); + } + newLh.close(); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTaskTest.java new file mode 100644 index 0000000000000..8b9c0b143028a --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTaskTest.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorPlacementPolicyCheckTask}. + */ +public class AuditorPlacementPolicyCheckTaskTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorPlacementPolicyCheckTaskTest.class); + + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager ledgerUnderreplicationManager; + + public AuditorPlacementPolicyCheckTaskTest() throws Exception { + super(3); + baseConf.setPageLimit(1); + baseConf.setAutoRecoveryDaemonEnabled(false); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + baseClientConf.setMetadataServiceUri( + metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + final BookKeeper bookKeeper = new BookKeeper(baseClientConf); + admin = new BookKeeperAdmin(bookKeeper, NullStatsLogger.INSTANCE, new ClientConfiguration(baseClientConf)); + LedgerManagerFactory ledgerManagerFactory = bookKeeper.getLedgerManagerFactory(); + ledgerManager = ledgerManagerFactory.newLedgerManager(); + ledgerUnderreplicationManager = ledgerManagerFactory.newLedgerUnderreplicationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } + super.tearDown(); + } + + @Test + public void testPlacementPolicyCheck() throws BKException, InterruptedException { + + // 1. create ledgers + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + // 2. init auditorPlacementPolicyCheckTask + final TestStatsProvider statsProvider = new TestStatsProvider(); + final TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + + AuditorPlacementPolicyCheckTask auditorPlacementPolicyCheckTask = new AuditorPlacementPolicyCheckTask( + baseConf, auditorStats, admin, ledgerManager, + ledgerUnderreplicationManager, null, (flag, throwable) -> flag.set(false)); + + // 3. placementPolicyCheck + auditorPlacementPolicyCheckTask.runTask(); + + // 4. verify + assertEquals("PLACEMENT_POLICY_CHECK_TIME", 1, ((TestStatsProvider.TestOpStatsLogger) + statsLogger.getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME)).getSuccessCount()); + assertEquals("numOfClosedLedgersAuditedInPlacementPolicyCheck", + numLedgers, + auditorPlacementPolicyCheckTask.getNumOfClosedLedgersAuditedInPlacementPolicyCheck().get()); + assertEquals("numOfLedgersFoundNotAdheringInPlacementPolicyCheck", + numLedgers, + auditorPlacementPolicyCheckTask.getNumOfLedgersFoundNotAdheringInPlacementPolicyCheck().get()); + } + +} \ No newline at end of file diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTest.java new file mode 100644 index 0000000000000..5637819a9275b --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTest.java @@ -0,0 +1,860 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicyImpl.REPP_DNS_RESOLVER_CLASS; +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.ZoneawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.api.DigestType; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.discover.BookieServiceInfo; +import org.apache.bookkeeper.discover.RegistrationManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.exceptions.MetadataException; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.replication.AuditorPeriodicCheckTest.TestAuditor; +import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.Gauge; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.apache.bookkeeper.util.StaticDNSResolver; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.zookeeper.KeeperException; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests the logic of Auditor's PlacementPolicyCheck. + */ +public class AuditorPlacementPolicyCheckTest extends BookKeeperClusterTestCase { + private MetadataBookieDriver driver; + + public AuditorPlacementPolicyCheckTest() throws Exception { + super(1); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + StaticDNSResolver.reset(); + + URI uri = URI.create(confByIndex(0).getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver = MetadataDrivers.getBookieDriver(uri); + ServerConfiguration serverConfiguration = new ServerConfiguration(confByIndex(0)); + serverConfiguration.setMetadataServiceUri( + serverConfiguration.getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver.initialize(serverConfiguration, NullStatsLogger.INSTANCE); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (null != driver) { + driver.close(); + } + super.tearDown(); + } + + @Test + public void testPlacementPolicyCheckWithBookiesFromDifferentRacks() throws Exception { + int numOfBookies = 5; + List bookieAddresses = new ArrayList<>(); + BookieSocketAddress bookieAddress; + RegistrationManager regManager = driver.createRegistrationManager(); + // all the numOfBookies (5) are going to be in different racks + for (int i = 0; i < numOfBookies; i++) { + bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181); + StaticDNSResolver.addNodeToRack(bookieAddress.getHostName(), "/rack" + (i)); + bookieAddresses.add(bookieAddress.toBookieId()); + regManager.registerBookie(bookieAddress.toBookieId(), false, BookieServiceInfo.EMPTY); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 4; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 4; + Collections.shuffle(bookieAddresses); + + // closed ledger + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + + Collections.shuffle(bookieAddresses); + ensembleSize = 4; + // closed ledger with multiple segments + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)) + .newEnsembleEntry(20L, bookieAddresses.subList(1, 5)) + .newEnsembleEntry(60L, bookieAddresses.subList(0, 4)) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + + Collections.shuffle(bookieAddresses); + // non-closed ledger + initMeta = LedgerMetadataBuilder.create() + .withId(3L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(3L, initMeta).get(); + + Collections.shuffle(bookieAddresses); + // non-closed ledger with multiple segments + initMeta = LedgerMetadataBuilder.create() + .withId(4L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)) + .newEnsembleEntry(20L, bookieAddresses.subList(1, 5)) + .newEnsembleEntry(60L, bookieAddresses.subList(0, 4)) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(4L, initMeta).get(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + /* + * since all of the bookies are in different racks, there shouldn't be any ledger not adhering + * to placement policy. + */ + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", 0, + ledgersNotAdheringToPlacementPolicyGuage.getSample()); + /* + * since all of the bookies are in different racks, there shouldn't be any ledger softly adhering + * to placement policy. + */ + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", 0, + ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicy() throws Exception { + int numOfBookies = 5; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack2"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 3; + + /* + * this closed ledger doesn't adhere to placement policy because there are only + * 3 racks, and the ensembleSize is 5. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + /* + * this is non-closed ledger, so it shouldn't count as ledger not + * adhering to placement policy + */ + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicyAndNotMarkToUnderreplication() + throws Exception { + int numOfBookies = 5; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack2"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 3; + + /* + * this closed ledger doesn't adhere to placement policy because there are only + * 3 racks, and the ensembleSize is 5. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + LedgerUnderreplicationManager underreplicationManager = mFactory.newLedgerUnderreplicationManager(); + long unnderReplicateLedgerId = underreplicationManager.pollLedgerToRereplicate(); + assertEquals(unnderReplicateLedgerId, -1); + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicyAndMarkToUnderreplication() + throws Exception { + int numOfBookies = 5; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack2"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 3; + + /* + * this closed ledger doesn't adhere to placement policy because there are only + * 3 racks, and the ensembleSize is 5. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + servConf.setRepairedPlacementPolicyNotAdheringBookieEnable(true); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + LedgerUnderreplicationManager underreplicationManager = mFactory.newLedgerUnderreplicationManager(); + long unnderReplicateLedgerId = underreplicationManager.pollLedgerToRereplicate(); + assertEquals(unnderReplicateLedgerId, 1L); + } + + @Test + public void testPlacementPolicyCheckForURLedgersElapsedRecoveryGracePeriod() throws Exception { + testPlacementPolicyCheckWithURLedgers(true); + } + + @Test + public void testPlacementPolicyCheckForURLedgersNotElapsedRecoveryGracePeriod() throws Exception { + testPlacementPolicyCheckWithURLedgers(false); + } + + public void testPlacementPolicyCheckWithURLedgers(boolean timeElapsed) throws Exception { + int numOfBookies = 4; + /* + * in timeElapsed=true scenario, set some low value, otherwise set some + * highValue. + */ + int underreplicatedLedgerRecoveryGracePeriod = timeElapsed ? 1 : 1000; + int numOfURLedgersElapsedRecoveryGracePeriod = 0; + List bookieAddresses = new ArrayList(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + LedgerUnderreplicationManager underreplicationManager = mFactory.newLedgerUnderreplicationManager(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + + long ledgerId1 = 1L; + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(ledgerId1) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(ledgerId1, initMeta).get(); + underreplicationManager.markLedgerUnderreplicated(ledgerId1, bookieAddresses.get(0).toString()); + if (timeElapsed) { + numOfURLedgersElapsedRecoveryGracePeriod++; + } + + /* + * this is non-closed ledger, it should also be reported as + * URLedgersElapsedRecoveryGracePeriod + */ + ensembleSize = 3; + long ledgerId2 = 21234561L; + initMeta = LedgerMetadataBuilder.create() + .withId(ledgerId2) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, + Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2))) + .newEnsembleEntry(100L, + Arrays.asList(bookieAddresses.get(3), bookieAddresses.get(1), bookieAddresses.get(2))) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(ledgerId2, initMeta).get(); + underreplicationManager.markLedgerUnderreplicated(ledgerId2, bookieAddresses.get(0).toString()); + if (timeElapsed) { + numOfURLedgersElapsedRecoveryGracePeriod++; + } + + /* + * this ledger is not marked underreplicated. + */ + long ledgerId3 = 31234561L; + initMeta = LedgerMetadataBuilder.create() + .withId(ledgerId3) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, + Arrays.asList(bookieAddresses.get(1), bookieAddresses.get(2), bookieAddresses.get(3))) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(ledgerId3, initMeta).get(); + + if (timeElapsed) { + /* + * in timeelapsed scenario, by waiting for + * underreplicatedLedgerRecoveryGracePeriod, recovery time must be + * elapsed. + */ + Thread.sleep((underreplicatedLedgerRecoveryGracePeriod + 1) * 1000); + } else { + /* + * in timeElapsed=false scenario, since + * underreplicatedLedgerRecoveryGracePeriod is set to some high + * value, there is no value in waiting. So just wait for some time + * and make sure urledgers are not reported as recoverytime elapsed + * urledgers. + */ + Thread.sleep(5000); + } + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setUnderreplicatedLedgerRecoveryGracePeriod(underreplicatedLedgerRecoveryGracePeriod); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge underreplicatedLedgersElapsedRecoveryGracePeriodGuage = statsLogger + .getGauge(ReplicationStats.NUM_UNDERREPLICATED_LEDGERS_ELAPSED_RECOVERY_GRACE_PERIOD); + assertEquals("NUM_UNDERREPLICATED_LEDGERS_ELAPSED_RECOVERY_GRACE_PERIOD guage value", + numOfURLedgersElapsedRecoveryGracePeriod, + underreplicatedLedgersElapsedRecoveryGracePeriodGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPolicyWithMultipleSegments() throws Exception { + int numOfBookies = 7; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack4"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.5", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.6", "/rack3"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 5; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 4; + + /* + * this closed ledger in each writeQuorumSize (5), there would be + * atleast minNumRacksPerWriteQuorumConfValue (4) racks. So it wont be + * counted as ledgers not adhering to placement policy. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 5)) + .newEnsembleEntry(20L, bookieAddresses.subList(1, 6)) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + + /* + * for the second segment bookies are from /rack1, /rack2 and /rack3, + * which is < minNumRacksPerWriteQuorumConfValue (4). So it is not + * adhering to placement policy. + * + * also for the third segment are from /rack1, /rack2 and /rack3, which + * is < minNumRacksPerWriteQuorumConfValue (4). So it is not adhering to + * placement policy. + * + * Though there are multiple segments are not adhering to placement + * policy, it should be counted as single ledger. + */ + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 5)) + .newEnsembleEntry(20L, + Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2), + bookieAddresses.get(4), bookieAddresses.get(5))) + .newEnsembleEntry(40L, + Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2), + bookieAddresses.get(4), bookieAddresses.get(6))) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY gauge value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY gauge value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testZoneawarePlacementPolicyCheck() throws Exception { + int numOfBookies = 6; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + int numOfLedgersSoftlyAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList(); + RegistrationManager regManager = driver.createRegistrationManager(); + /* + * 6 bookies - 3 zones and 2 uds + */ + for (int i = 0; i < numOfBookies; i++) { + BookieSocketAddress bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181); + bookieAddresses.add(bookieAddress.toBookieId()); + regManager.registerBookie(bookieAddress.toBookieId(), false, BookieServiceInfo.EMPTY); + String zone = "/zone" + (i % 3); + String upgradeDomain = "/ud" + (i % 2); + String networkLocation = zone + upgradeDomain; + StaticDNSResolver.addNodeToRack(bookieAddress.getHostName(), networkLocation); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setDesiredNumZonesPerWriteQuorum(3); + servConf.setMinNumZonesPerWriteQuorum(2); + setServerConfigPropertiesForZonePlacement(servConf); + + /* + * this closed ledger adheres to ZoneAwarePlacementPolicy, since + * ensemble is spread across 3 zones and 2 UDs + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(6) + .withWriteQuorumSize(6) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + + /* + * this is non-closed ledger, so though ensemble is not adhering to + * placement policy (since ensemble is not multiple of writeQuorum), + * this shouldn't be reported + */ + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(6) + .withWriteQuorumSize(5) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, bookieAddresses) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + + /* + * this is closed ledger, since ensemble is not multiple of writeQuorum, + * this ledger is not adhering to placement policy. + */ + initMeta = LedgerMetadataBuilder.create() + .withId(3L) + .withEnsembleSize(6) + .withWriteQuorumSize(5) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(3L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + /* + * this closed ledger adheres softly to ZoneAwarePlacementPolicy, since + * ensemble/writeQuorum of size 4 has spread across just + * minNumZonesPerWriteQuorum (2). + */ + List newEnsemble = new ArrayList(); + newEnsemble.add(bookieAddresses.get(0)); + newEnsemble.add(bookieAddresses.get(1)); + newEnsemble.add(bookieAddresses.get(3)); + newEnsemble.add(bookieAddresses.get(4)); + initMeta = LedgerMetadataBuilder.create() + .withId(4L) + .withEnsembleSize(4) + .withWriteQuorumSize(4) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, newEnsemble) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(4L, initMeta).get(); + numOfLedgersSoftlyAdheringToPlacementPolicy++; + + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersSoftlyAdheringToPlacementPolicy, + ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + private void setServerConfigPropertiesForRackPlacement(ServerConfiguration servConf) { + setServerConfigProperties(servConf, RackawareEnsemblePlacementPolicy.class.getName()); + } + + private void setServerConfigPropertiesForZonePlacement(ServerConfiguration servConf) { + setServerConfigProperties(servConf, ZoneawareEnsemblePlacementPolicy.class.getName()); + } + + private void setServerConfigProperties(ServerConfiguration servConf, String ensemblePlacementPolicyClass) { + servConf.setProperty(REPP_DNS_RESOLVER_CLASS, StaticDNSResolver.class.getName()); + servConf.setProperty(ClientConfiguration.ENSEMBLE_PLACEMENT_POLICY, ensemblePlacementPolicyClass); + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + servConf.setAuditorPeriodicReplicasCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(1000); + } + + private TestStatsLogger startAuditorAndWaitForPlacementPolicyCheck(ServerConfiguration servConf, + MutableObject auditorRef) throws MetadataException, CompatibilityException, KeeperException, + InterruptedException, UnavailableException, UnknownHostException { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger placementPolicyCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, + statsLogger, null); + auditorRef.setValue(auditor); + CountDownLatch latch = auditor.getLatch(); + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 0, placementPolicyCheckStatsLogger.getSuccessCount()); + urm.setPlacementPolicyCheckCTime(-1); + auditor.start(); + /* + * since placementPolicyCheckCTime is set to -1, placementPolicyCheck should be + * scheduled to run with no initialdelay + */ + assertTrue("placementPolicyCheck should have executed", latch.await(20, TimeUnit.SECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (placementPolicyCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 1, placementPolicyCheckStatsLogger.getSuccessCount()); + return statsLogger; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTaskTest.java new file mode 100644 index 0000000000000..62162bd25f427 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTaskTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorReplicasCheckTask}. + */ +public class AuditorReplicasCheckTaskTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorReplicasCheckTaskTest.class); + + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager ledgerUnderreplicationManager; + + public AuditorReplicasCheckTaskTest() throws Exception { + super(3); + baseConf.setPageLimit(1); + baseConf.setAutoRecoveryDaemonEnabled(false); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + baseClientConf.setMetadataServiceUri( + metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + final BookKeeper bookKeeper = new BookKeeper(baseClientConf); + admin = new BookKeeperAdmin(bookKeeper, NullStatsLogger.INSTANCE, new ClientConfiguration(baseClientConf)); + LedgerManagerFactory ledgerManagerFactory = bookKeeper.getLedgerManagerFactory(); + ledgerManager = ledgerManagerFactory.newLedgerManager(); + ledgerUnderreplicationManager = ledgerManagerFactory.newLedgerUnderreplicationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } + if (admin != null) { + admin.close(); + } + super.tearDown(); + } + + @Test + public void testReplicasCheck() throws BKException, InterruptedException { + + // 1. create ledgers + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + // 2. init auditorReplicasCheckTask + final TestStatsProvider statsProvider = new TestStatsProvider(); + final TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + AuditorReplicasCheckTask auditorReplicasCheckTask = new AuditorReplicasCheckTask( + baseConf, auditorStats, admin, ledgerManager, + ledgerUnderreplicationManager, null, (flag, throwable) -> flag.set(false)); + + // 3. replicasCheck + auditorReplicasCheckTask.runTask(); + + // 4. verify + assertEquals("REPLICAS_CHECK_TIME", 1, ((TestStatsProvider.TestOpStatsLogger) + statsLogger.getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME)).getSuccessCount()); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTest.java new file mode 100644 index 0000000000000..2e9dbc158597d --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTest.java @@ -0,0 +1,936 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.client.api.DigestType; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.discover.BookieServiceInfo; +import org.apache.bookkeeper.discover.RegistrationManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.exceptions.MetadataException; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.replication.AuditorPeriodicCheckTest.TestAuditor; +import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.Gauge; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.apache.bookkeeper.util.AvailabilityOfEntriesOfLedger; +import org.apache.bookkeeper.util.StaticDNSResolver; +import org.apache.commons.collections4.map.MultiKeyMap; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.zookeeper.KeeperException; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests the logic of Auditor's ReplicasCheck. + */ +public class AuditorReplicasCheckTest extends BookKeeperClusterTestCase { + private MetadataBookieDriver driver; + private RegistrationManager regManager; + + public AuditorReplicasCheckTest() throws Exception { + super(1); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + StaticDNSResolver.reset(); + + URI uri = URI.create(confByIndex(0).getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver = MetadataDrivers.getBookieDriver(uri); + ServerConfiguration serverConfiguration = new ServerConfiguration(confByIndex(0)); + serverConfiguration.setMetadataServiceUri( + serverConfiguration.getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver.initialize(serverConfiguration, NullStatsLogger.INSTANCE); + regManager = driver.createRegistrationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (null != regManager) { + regManager.close(); + } + if (null != driver) { + driver.close(); + } + super.tearDown(); + } + + private class TestBookKeeperAdmin extends BookKeeperAdmin { + + private final MultiKeyMap returnAvailabilityOfEntriesOfLedger; + private final MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger; + + public TestBookKeeperAdmin(BookKeeper bkc, StatsLogger statsLogger, + MultiKeyMap returnAvailabilityOfEntriesOfLedger, + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger) { + super(bkc, statsLogger, baseClientConf); + this.returnAvailabilityOfEntriesOfLedger = returnAvailabilityOfEntriesOfLedger; + this.errorReturnValueForGetAvailabilityOfEntriesOfLedger = + errorReturnValueForGetAvailabilityOfEntriesOfLedger; + } + + @Override + public CompletableFuture asyncGetListOfEntriesOfLedger( + BookieId address, long ledgerId) { + CompletableFuture futureResult = + new CompletableFuture(); + Integer errorReturnValue = errorReturnValueForGetAvailabilityOfEntriesOfLedger.get(address.toString(), + Long.toString(ledgerId)); + if (errorReturnValue != null) { + futureResult.completeExceptionally(BKException.create(errorReturnValue).fillInStackTrace()); + } else { + AvailabilityOfEntriesOfLedger availabilityOfEntriesOfLedger = returnAvailabilityOfEntriesOfLedger + .get(address.toString(), Long.toString(ledgerId)); + futureResult.complete(availabilityOfEntriesOfLedger); + } + return futureResult; + } + } + + private TestStatsLogger startAuditorAndWaitForReplicasCheck(ServerConfiguration servConf, + MutableObject auditorRef, + MultiKeyMap expectedReturnAvailabilityOfEntriesOfLedger, + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger) + throws MetadataException, CompatibilityException, KeeperException, InterruptedException, + UnavailableException, UnknownHostException { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger replicasCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, true, + new TestBookKeeperAdmin(bkc, statsLogger, expectedReturnAvailabilityOfEntriesOfLedger, + errorReturnValueForGetAvailabilityOfEntriesOfLedger), + true, statsLogger, null); + auditorRef.setValue(auditor); + CountDownLatch latch = auditor.getLatch(); + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 0, replicasCheckStatsLogger.getSuccessCount()); + urm.setReplicasCheckCTime(-1); + auditor.start(); + /* + * since replicasCheckCTime is set to -1, replicasCheck should be + * scheduled to run with no initialdelay + */ + assertTrue("replicasCheck should have executed", latch.await(20, TimeUnit.SECONDS)); + for (int i = 0; i < 200; i++) { + Thread.sleep(100); + if (replicasCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 1, replicasCheckStatsLogger.getSuccessCount()); + return statsLogger; + } + + private void setServerConfigProperties(ServerConfiguration servConf) { + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicReplicasCheckInterval(1000); + } + + List addAndRegisterBookies(int numOfBookies) + throws BookieException { + BookieId bookieAddress; + List bookieAddresses = new ArrayList(); + for (int i = 0; i < numOfBookies; i++) { + bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + return bookieAddresses; + } + + private void createClosedLedgerMetadata(LedgerManager lm, long ledgerId, int ensembleSize, int writeQuorumSize, + int ackQuorumSize, Map> segmentEnsembles, long lastEntryId, int length, + DigestType digestType, byte[] password) throws InterruptedException, ExecutionException { + LedgerMetadataBuilder ledgerMetadataBuilder = LedgerMetadataBuilder.create(); + ledgerMetadataBuilder.withId(ledgerId).withEnsembleSize(ensembleSize).withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize).withClosedState().withLastEntryId(lastEntryId).withLength(length) + .withDigestType(digestType).withPassword(password); + for (Map.Entry> mapEntry : segmentEnsembles.entrySet()) { + ledgerMetadataBuilder.newEnsembleEntry(mapEntry.getKey(), mapEntry.getValue()); + } + LedgerMetadata initMeta = ledgerMetadataBuilder.build(); + lm.createLedgerMetadata(ledgerId, initMeta).get(); + } + + private void createNonClosedLedgerMetadata(LedgerManager lm, long ledgerId, int ensembleSize, int writeQuorumSize, + int ackQuorumSize, Map> segmentEnsembles, DigestType digestType, + byte[] password) throws InterruptedException, ExecutionException { + LedgerMetadataBuilder ledgerMetadataBuilder = LedgerMetadataBuilder.create(); + ledgerMetadataBuilder.withId(ledgerId).withEnsembleSize(ensembleSize).withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize).withDigestType(digestType).withPassword(password); + for (Map.Entry> mapEntry : segmentEnsembles.entrySet()) { + ledgerMetadataBuilder.newEnsembleEntry(mapEntry.getKey(), mapEntry.getValue()); + } + LedgerMetadata initMeta = ledgerMetadataBuilder.build(); + lm.createLedgerMetadata(ledgerId, initMeta).get(); + } + + private void runTestScenario(MultiKeyMap returnAvailabilityOfEntriesOfLedger, + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger, + int expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + int expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + int expectedNumLedgersHavingLessThanWQReplicasOfAnEntry) throws Exception { + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + setServerConfigProperties(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForReplicasCheck(servConf, auditorRef, + returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger); + checkReplicasCheckStats(statsLogger, expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + expectedNumLedgersHavingLessThanWQReplicasOfAnEntry); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + } + } + + private void checkReplicasCheckStats(TestStatsLogger statsLogger, + int expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + int expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + int expectedNumLedgersHavingLessThanWQReplicasOfAnEntry) { + Gauge numLedgersFoundHavingNoReplicaOfAnEntryGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_HAVING_NO_REPLICA_OF_AN_ENTRY); + Gauge numLedgersHavingLessThanAQReplicasOfAnEntryGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_HAVING_LESS_THAN_AQ_REPLICAS_OF_AN_ENTRY); + Gauge numLedgersHavingLessThanWQReplicasOfAnEntryGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_HAVING_LESS_THAN_WQ_REPLICAS_OF_AN_ENTRY); + + assertEquals("NUM_LEDGERS_HAVING_NO_REPLICA_OF_AN_ENTRY guage value", + expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + numLedgersFoundHavingNoReplicaOfAnEntryGuage.getSample()); + assertEquals("NUM_LEDGERS_HAVING_LESS_THAN_AQ_REPLICAS_OF_AN_ENTRY guage value", + expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + numLedgersHavingLessThanAQReplicasOfAnEntryGuage.getSample()); + assertEquals("NUM_LEDGERS_HAVING_LESS_THAN_WQ_REPLICAS_OF_AN_ENTRY guage value", + expectedNumLedgersHavingLessThanWQReplicasOfAnEntry, + numLedgersHavingLessThanWQReplicasOfAnEntryGuage.getSample()); + } + + /* + * For all the ledgers and for all the bookies, + * asyncGetListOfEntriesOfLedger would return + * BookieHandleNotAvailableException, so these ledgers wouldn't be counted + * against expectedNumLedgersFoundHavingNoReplicaOfAnEntry / + * LessThanAQReplicasOfAnEntry / LessThanWQReplicasOfAnEntry. + */ + @Test + public void testReplicasCheckForBookieHandleNotAvailable() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 4; + int ackQuorumSize = 2; + long lastEntryId = 100; + int length = 10000; + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + /* + * closed ledger + * + * for this ledger, for all the bookies we are setting + * errorReturnValueForGetAvailabilityOfEntriesOfLedger to + * BookieHandleNotAvailableException so asyncGetListOfEntriesOfLedger will + * return BookieHandleNotAvailableException. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + segmentEnsembles.put(0L, bookieAddresses); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + ensembleSize = 4; + /* + * closed ledger with multiple segments + * + * for this ledger, for all the bookies we are setting + * errorReturnValueForGetAvailabilityOfEntriesOfLedger to + * BookieHandleNotAvailableException so asyncGetListOfEntriesOfLedger will + * return BookieHandleNotAvailableException. + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(20L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(60L, bookieAddresses.subList(0, 4)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + /* + * non-closed ledger + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 3L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + /* + * non-closed ledger with multiple segments + * + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(20L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(60L, bookieAddresses.subList(0, 4)); + ledgerId = 4L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, 0, 0, + 0); + } + + /* + * In this testscenario all the ledgers have a missing entry. So all closed + * ledgers should be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry. + */ + @Test + public void testReplicasCheckForLedgersFoundHavingNoReplica() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 4; + int ackQuorumSize = 2; + long lastEntryId = 100; + int length = 10000; + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingNoReplicaOfAnEntry = 0; + + /* + * closed ledger + * + * for this ledger we are setting returnAvailabilityOfEntriesOfLedger to + * Empty one for all of the bookies, so this ledger would be counted in + * ledgersFoundHavingNoReplicaOfAnEntry . + */ + Map> segmentEnsembles = new LinkedHashMap>(); + segmentEnsembles.put(0L, bookieAddresses); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + returnAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + } + numLedgersFoundHavingNoReplicaOfAnEntry++; + + ensembleSize = 4; + /* + * closed ledger with multiple segments + * + * for this ledger we are setting + * errorReturnValueForGetAvailabilityOfEntriesOfLedger to + * NoSuchLedgerExistsException. This is equivalent to + * EMPTY_AVAILABILITYOFENTRIESOFLEDGER. So this ledger would be counted + * in ledgersFoundHavingNoReplicaOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(20L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(60L, bookieAddresses.subList(0, 4)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.NoSuchLedgerExistsException); + } + numLedgersFoundHavingNoReplicaOfAnEntry++; + + /* + * non-closed ledger + * + * since this is non-closed ledger, it should not be counted in + * ledgersFoundHavingNoReplicaOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 3L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + returnAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + } + + ensembleSize = 3; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + /* + * closed ledger + * + * for this ledger we are setting returnAvailabilityOfEntriesOfLedger to + * just {0l} for all of the bookies and entry 1l is missing for all of + * the bookies, so this ledger would be counted in + * ledgersFoundHavingNoReplicaOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 3)); + ledgerId = 4L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + returnAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0L })); + } + numLedgersFoundHavingNoReplicaOfAnEntry++; + + /* + * For this closed ledger, entry 1 is missing. So it should be counted + * towards numLedgersFoundHavingNoReplicaOfAnEntry. + */ + ensembleSize = 4; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 3; + length = 10000; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 5L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2, 3 })); + numLedgersFoundHavingNoReplicaOfAnEntry++; + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, + numLedgersFoundHavingNoReplicaOfAnEntry, 0, 0); + } + + /* + * In this testscenario all the ledgers have an entry with less than AQ + * number of copies. So all closed ledgers should be counted towards + * numLedgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + @Test + public void testReplicasCheckForLedgersFoundHavingLessThanAQReplicasOfAnEntry() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingLessThanAQReplicasOfAnEntry = 0; + + /* + * closed ledger + * + * for this ledger there is only one copy of entry 2, so this ledger + * would be counted towards + * ledgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + long lastEntryId = 3; + int length = 10000; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2, 3 })); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + /* + * closed ledger with multiple segments. + * + * for this ledger there is only one copy of entry 2, so this ledger + * would be counted towards + * ledgersFoundHavingLessThanAQReplicasOfAnEntry. + * + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] {})); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 3 })); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + /* + * closed ledger with multiple segments + * + * for this ledger entry 2 is overrreplicated, but it has only one copy + * in the set of bookies it is supposed to be. So it should be counted + * towards ledgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 3L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 3 })); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + /* + * non-closed ledger + * + * since this is non-closed ledger, it should not be counted towards + * ledgersFoundHavingLessThanAQReplicasOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 4L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] {})); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 3 })); + + /* + * this is closed ledger. + * + * For third bookie, asyncGetListOfEntriesOfLedger will fail with + * BookieHandleNotAvailableException, so this should not be counted + * against missing copies of an entry. Other than that, for both entries + * 0 and 1, two copies are missing. Hence this should be counted towards + * numLedgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + ensembleSize = 3; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 3)); + ledgerId = 5L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, 0, + numLedgersFoundHavingLessThanAQReplicasOfAnEntry, 0); + } + + /* + * In this testscenario all the ledgers have an entry with less than WQ + * number of copies but greater than AQ. So all closed ledgers should be + * counted towards numLedgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + @Test + public void testReplicasCheckForLedgersFoundHavingLessThanWQReplicasOfAnEntry() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingLessThanWQReplicasOfAnEntry = 0; + + /* + * closed ledger + * + * for this ledger a copy of entry 3, so this ledger would be counted + * towards ledgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + long lastEntryId = 3; + int length = 10000; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2, 3 })); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + /* + * closed ledger with multiple segments + * + * for this ledger a copy of entry 0 and entry 2 are missing, so this + * ledger would be counted towards + * ledgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] {})); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2, 3 })); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + /* + * non-closed ledger with multiple segments + * + * since this is non-closed ledger, it should not be counted towards + * ledgersFoundHavingLessThanWQReplicasOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 3L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), + Long.toString(ledgerId), BKException.Code.NoSuchLedgerExistsException); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2, 3 })); + + /* + * closed ledger. + * + * for this ledger entry 0 is overrreplicated, but a copy is missing in + * the set of bookies it is supposed to be. So it should be counted + * towards ledgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + ensembleSize = 4; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 4L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + /* + * this is closed ledger. + * + * For third bookie, asyncGetListOfEntriesOfLedger will fail with + * BookieHandleNotAvailableException, so this should not be counted + * against missing copies of an entry. Other than that, for both entries + * 0 and 1, a copy is missing. Hence this should be counted towards + * numLedgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + ensembleSize = 3; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 3)); + ledgerId = 5L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, 0, 0, + numLedgersFoundHavingLessThanWQReplicasOfAnEntry); + } + + /* + * In this testscenario all the ledgers have empty segments. + */ + @Test + public void testReplicasCheckForLedgersWithEmptySegments() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingNoReplicaOfAnEntry = 0; + int numLedgersFoundHavingLessThanAQReplicasOfAnEntry = 0; + int numLedgersFoundHavingLessThanWQReplicasOfAnEntry = 0; + + /* + * closed ledger. + * + * This closed Ledger has no entry. So it should not be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry/LessThanAQReplicasOfAnEntry + * /WQReplicasOfAnEntry. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + long lastEntryId = -1L; + int length = 0; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + + /* + * closed ledger with multiple segments. + * + * This ledger has empty last segment, but all the entries have + * writeQuorumSize number of copies, So it should not be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry/LessThanAQReplicasOfAnEntry/ + * WQReplicasOfAnEntry. + */ + lastEntryId = 2; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(1, 5)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2 })); + + /* + * Closed ledger with multiple segments. + * + * Segment0, Segment1, Segment3, Segment5 and Segment6 are empty. + * Entries from entryid 3 are missing. So it should be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry. + */ + lastEntryId = 5; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(4L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(4L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(1, 5)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(0, 4)); + ledgerId = 3L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2 })); + numLedgersFoundHavingNoReplicaOfAnEntry++; + + /* + * non-closed ledger with multiple segments + * + * since this is non-closed ledger, it should not be counted towards + * ledgersFoundHavingLessThanWQReplicasOfAnEntry + */ + lastEntryId = 2; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(0L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(1, 5)); + ledgerId = 4L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2 })); + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, + numLedgersFoundHavingNoReplicaOfAnEntry, numLedgersFoundHavingLessThanAQReplicasOfAnEntry, + numLedgersFoundHavingLessThanWQReplicasOfAnEntry); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorRollingRestartTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorRollingRestartTest.java new file mode 100644 index 0000000000000..3e5081ed0ef9d --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorRollingRestartTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithLedgerManagerFactory; +import static org.testng.AssertJUnit.assertEquals; +import com.google.common.util.concurrent.UncheckedExecutionException; +import lombok.Cleanup; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerAuditorManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.test.TestCallbacks; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test auditor behaviours during a rolling restart. + */ +public class AuditorRollingRestartTest extends BookKeeperClusterTestCase { + + public AuditorRollingRestartTest() throws Exception { + super(3, 600); + // run the daemon within the bookie + setAutoRecoveryEnabled(true); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + @Override + protected void startBKCluster(String metadataServiceUri) throws Exception { + super.startBKCluster(metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + } + + /** + * Test no auditing during restart if disabled. + */ + @Test + public void testAuditingDuringRollingRestart() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + runFunctionWithLedgerManagerFactory( + confByIndex(0), + mFactory -> { + try { + testAuditingDuringRollingRestart(mFactory); + } catch (Exception e) { + throw new UncheckedExecutionException(e.getMessage(), e); + } + return null; + } + ); + } + + private void testAuditingDuringRollingRestart(LedgerManagerFactory mFactory) throws Exception { + final LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + for (int i = 0; i < 10; i++) { + lh.asyncAddEntry("foobar".getBytes(), new TestCallbacks.AddCallbackFuture(i), null); + } + lh.addEntry("foobar".getBytes()); + lh.close(); + + assertEquals("shouldn't be anything under replicated", + underReplicationManager.pollLedgerToRereplicate(), -1); + underReplicationManager.disableLedgerReplication(); + + @Cleanup + LedgerAuditorManager lam = mFactory.newLedgerAuditorManager(); + BookieId auditor = lam.getCurrentAuditor(); + ServerConfiguration conf = killBookie(auditor); + Thread.sleep(2000); + startBookie(conf); + Thread.sleep(2000); // give it time to run + assertEquals("shouldn't be anything under replicated", -1, + underReplicationManager.pollLedgerToRereplicate()); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuthAutoRecoveryTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuthAutoRecoveryTest.java new file mode 100644 index 0000000000000..41e159b77714f --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuthAutoRecoveryTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import org.apache.bookkeeper.auth.AuthCallbacks; +import org.apache.bookkeeper.auth.AuthToken; +import org.apache.bookkeeper.auth.ClientAuthProvider; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.proto.ClientConnectionPeer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies the auditor bookie scenarios from the auth point-of-view. + */ +public class AuthAutoRecoveryTest extends BookKeeperClusterTestCase { + + private static final Logger LOG = LoggerFactory + .getLogger(AuthAutoRecoveryTest.class); + + public static final String TEST_AUTH_PROVIDER_PLUGIN_NAME = "TestAuthProviderPlugin"; + + private static String clientSideRole; + + private static class AuditorClientAuthInterceptorFactory + implements ClientAuthProvider.Factory { + + @Override + public String getPluginName() { + return TEST_AUTH_PROVIDER_PLUGIN_NAME; + } + + @Override + public void init(ClientConfiguration conf) { + clientSideRole = conf.getClientRole(); + } + + @Override + public ClientAuthProvider newProvider(ClientConnectionPeer addr, + final AuthCallbacks.GenericCallback completeCb) { + return new ClientAuthProvider() { + public void init(AuthCallbacks.GenericCallback cb) { + completeCb.operationComplete(BKException.Code.OK, null); + } + + public void process(AuthToken m, AuthCallbacks.GenericCallback cb) { + } + }; + } + } + + protected ServerConfiguration newServerConfiguration() throws Exception { + ServerConfiguration conf = super.newServerConfiguration(); + conf.setClientAuthProviderFactoryClass(AuditorClientAuthInterceptorFactory.class.getName()); + return conf; + } + + public AuthAutoRecoveryTest() { + super(6); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + /* + * test the client role of the auditor + */ + @Test + public void testAuthClientRole() throws Exception { + ServerConfiguration config = confByIndex(0); + assertEquals(AuditorClientAuthInterceptorFactory.class.getName(), config.getClientAuthProviderFactoryClass()); + AutoRecoveryMain main = new AutoRecoveryMain(config); + try { + main.start(); + Thread.sleep(500); + assertTrue("AuditorElector should be running", + main.auditorElector.isRunning()); + assertTrue("Replication worker should be running", + main.replicationWorker.isRunning()); + } finally { + main.shutdown(); + } + assertEquals(ClientConfiguration.CLIENT_ROLE_SYSTEM, clientSideRole); + } + +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AutoRecoveryMainTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AutoRecoveryMainTest.java new file mode 100644 index 0000000000000..1d741c551ddb9 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AutoRecoveryMainTest.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertTrue; +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.util.TestUtils; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.ZooKeeper; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test the AuditorPeer. + */ +public class AutoRecoveryMainTest extends BookKeeperClusterTestCase { + + public AutoRecoveryMainTest() throws Exception { + super(3); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Test the startup of the auditorElector and RW. + */ + @Test + public void testStartup() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + AutoRecoveryMain main = new AutoRecoveryMain(confByIndex(0)); + try { + main.start(); + Thread.sleep(500); + assertTrue("AuditorElector should be running", + main.auditorElector.isRunning()); + assertTrue("Replication worker should be running", + main.replicationWorker.isRunning()); + } finally { + main.shutdown(); + } + } + + /* + * Test the shutdown of all daemons + */ + @Test + public void testShutdown() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + AutoRecoveryMain main = new AutoRecoveryMain(confByIndex(0)); + main.start(); + Thread.sleep(500); + assertTrue("AuditorElector should be running", + main.auditorElector.isRunning()); + assertTrue("Replication worker should be running", + main.replicationWorker.isRunning()); + + main.shutdown(); + assertFalse("AuditorElector should not be running", + main.auditorElector.isRunning()); + assertFalse("Replication worker should not be running", + main.replicationWorker.isRunning()); + } + + /** + * Test that, if an autorecovery looses its ZK connection/session it will + * shutdown. + */ + @Test + public void testAutoRecoverySessionLoss() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + confByIndex(1).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + confByIndex(2).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + /* + * initialize three AutoRecovery instances. + */ + AutoRecoveryMain main1 = new AutoRecoveryMain(confByIndex(0)); + AutoRecoveryMain main2 = new AutoRecoveryMain(confByIndex(1)); + AutoRecoveryMain main3 = new AutoRecoveryMain(confByIndex(2)); + + /* + * start main1, make sure all the components are started and main1 is + * the current Auditor + */ + PulsarMetadataClientDriver pulsarMetadataClientDriver1 = startAutoRecoveryMain(main1); + ZooKeeper zk1 = getZk(pulsarMetadataClientDriver1); + + // Wait until auditor gets elected + for (int i = 0; i < 10; i++) { + try { + if (main1.auditorElector.getCurrentAuditor() != null) { + break; + } else { + Thread.sleep(1000); + } + } catch (IOException e) { + Thread.sleep(1000); + } + } + BookieId currentAuditor = main1.auditorElector.getCurrentAuditor(); + assertNotNull(currentAuditor); + Auditor auditor1 = main1.auditorElector.getAuditor(); + assertEquals("Current Auditor should be AR1", currentAuditor, BookieImpl.getBookieId(confByIndex(0))); + Awaitility.waitAtMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + assertNotNull(auditor1); + assertTrue("Auditor of AR1 should be running", auditor1.isRunning()); + }); + + + /* + * start main2 and main3 + */ + PulsarMetadataClientDriver pulsarMetadataClientDriver2 = startAutoRecoveryMain(main2); + ZooKeeper zk2 = getZk(pulsarMetadataClientDriver2); + + PulsarMetadataClientDriver pulsarMetadataClientDriver3 = startAutoRecoveryMain(main3); + ZooKeeper zk3 = getZk(pulsarMetadataClientDriver3); + + + /* + * make sure AR1 is still the current Auditor and AR2's and AR3's + * auditors are not running. + */ + assertEquals("Current Auditor should still be AR1", currentAuditor, BookieImpl.getBookieId(confByIndex(0))); + Awaitility.await().untilAsserted(() -> { + assertTrue("AR2's Auditor should not be running", (main2.auditorElector.getAuditor() == null + || !main2.auditorElector.getAuditor().isRunning())); + assertTrue("AR3's Auditor should not be running", (main3.auditorElector.getAuditor() == null + || !main3.auditorElector.getAuditor().isRunning())); + }); + + + /* + * expire zk2 and zk1 sessions. + */ + zkUtil.expireSession(zk2); + zkUtil.expireSession(zk1); + + /* + * wait for some time for all the components of AR1 and AR2 are + * shutdown. + */ + for (int i = 0; i < 10; i++) { + if (!main1.auditorElector.isRunning() && !main1.replicationWorker.isRunning() + && !main1.isAutoRecoveryRunning() && !main2.auditorElector.isRunning() + && !main2.replicationWorker.isRunning() && !main2.isAutoRecoveryRunning()) { + break; + } + Thread.sleep(1000); + } + + /* + * the AR3 should be current auditor. + */ + currentAuditor = main3.auditorElector.getCurrentAuditor(); + assertEquals("Current Auditor should be AR3", currentAuditor, BookieImpl.getBookieId(confByIndex(2))); + Awaitility.await().untilAsserted(() -> { + assertNotNull(main3.auditorElector.getAuditor()); + assertTrue("Auditor of AR3 should be running", main3.auditorElector.getAuditor().isRunning()); + }); + + Awaitility.waitAtMost(100, TimeUnit.SECONDS).untilAsserted(() -> { + /* + * since AR3 is current auditor, AR1's auditor should not be running + * anymore. + */ + assertFalse("AR1's auditor should not be running", auditor1.isRunning()); + + /* + * components of AR2 and AR3 should not be running since zk1 and zk2 + * sessions are expired. + */ + assertFalse("Elector1 should have shutdown", main1.auditorElector.isRunning()); + assertFalse("RW1 should have shutdown", main1.replicationWorker.isRunning()); + assertFalse("AR1 should have shutdown", main1.isAutoRecoveryRunning()); + assertFalse("Elector2 should have shutdown", main2.auditorElector.isRunning()); + assertFalse("RW2 should have shutdown", main2.replicationWorker.isRunning()); + assertFalse("AR2 should have shutdown", main2.isAutoRecoveryRunning()); + }); + + } + + /* + * start autoRecoveryMain and make sure all its components are running and + * myVote node is existing + */ + PulsarMetadataClientDriver startAutoRecoveryMain(AutoRecoveryMain autoRecoveryMain) throws Exception { + autoRecoveryMain.start(); + PulsarMetadataClientDriver pulsarMetadataClientDriver = (PulsarMetadataClientDriver) autoRecoveryMain.bkc + .getMetadataClientDriver(); + TestUtils.assertEventuallyTrue("autoRecoveryMain components should be running", + () -> autoRecoveryMain.auditorElector.isRunning() + && autoRecoveryMain.replicationWorker.isRunning() && autoRecoveryMain.isAutoRecoveryRunning()); + return pulsarMetadataClientDriver; + } + + private ZooKeeper getZk(PulsarMetadataClientDriver pulsarMetadataClientDriver) throws Exception { + PulsarLedgerManagerFactory pulsarLedgerManagerFactory = + (PulsarLedgerManagerFactory) pulsarMetadataClientDriver.getLedgerManagerFactory(); + Field field = pulsarLedgerManagerFactory.getClass().getDeclaredField("store"); + field.setAccessible(true); + ZKMetadataStore zkMetadataStore = (ZKMetadataStore) field.get(pulsarLedgerManagerFactory); + return zkMetadataStore.getZkClient(); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookKeeperClusterTestCase.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookKeeperClusterTestCase.java new file mode 100644 index 0000000000000..c681a1f0764ee --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookKeeperClusterTestCase.java @@ -0,0 +1,851 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * This file is derived from BookKeeperClusterTestCase from Apache BookKeeper + * http://bookkeeper.apache.org + */ + +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.util.BookKeeperConstants.AVAILABLE_NODE; +import static org.apache.pulsar.common.util.PortManager.nextLockedFreePort; +import static org.testng.Assert.assertFalse; +import com.google.common.base.Stopwatch; +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Method; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.bookkeeper.bookie.Bookie; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.client.BookKeeperTestClient; +import org.apache.bookkeeper.client.TestStatsProvider; +import org.apache.bookkeeper.common.allocator.PoolingPolicy; +import org.apache.bookkeeper.conf.AbstractConfiguration; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.conf.TestBKConfiguration; +import org.apache.bookkeeper.metastore.InMemoryMetaStore; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.bookkeeper.test.ServerTester; +import org.apache.bookkeeper.test.TmpDirs; +import org.apache.bookkeeper.test.ZooKeeperCluster; +import org.apache.bookkeeper.test.ZooKeeperClusterUtil; +import org.apache.pulsar.common.util.PortManager; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.awaitility.Awaitility; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterTest; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.BeforeTest; + +/** + * A class runs several bookie servers for testing. + */ +public abstract class BookKeeperClusterTestCase { + + static final Logger LOG = LoggerFactory.getLogger(BookKeeperClusterTestCase.class); + + protected String testName; + + @BeforeMethod + public void handleTestMethodName(Method method) { + testName = method.getName(); + } + + // Metadata service related variables + protected final ZooKeeperCluster zkUtil; + protected ZooKeeper zkc; + protected String metadataServiceUri; + protected FaultInjectionMetadataStore metadataStore; + + // BookKeeper related variables + protected final TmpDirs tmpDirs = new TmpDirs(); + protected final List servers = new LinkedList<>(); + + protected int numBookies; + protected BookKeeperTestClient bkc; + protected boolean useUUIDasBookieId = true; + + /* + * Loopback interface is set as the listening interface and allowloopback is + * set to true in this server config. So bookies in this test process would + * bind to loopback address. + */ + protected final ServerConfiguration baseConf = TestBKConfiguration.newServerConfiguration(); + protected final ClientConfiguration baseClientConf = TestBKConfiguration.newClientConfiguration(); + + private boolean isAutoRecoveryEnabled; + protected ExecutorService executor; + private final List bookiePorts = new ArrayList<>(); + + SynchronousQueue asyncExceptions = new SynchronousQueue<>(); + protected void captureThrowable(Runnable c) { + try { + c.run(); + } catch (Throwable e) { + LOG.error("Captured error: ", e); + asyncExceptions.add(e); + } + } + + public BookKeeperClusterTestCase(int numBookies) { + this(numBookies, 120); + } + + public BookKeeperClusterTestCase(int numBookies, int testTimeoutSecs) { + this(numBookies, 1, testTimeoutSecs); + } + + public BookKeeperClusterTestCase(int numBookies, int numOfZKNodes, int testTimeoutSecs) { + this.numBookies = numBookies; + if (numOfZKNodes == 1) { + zkUtil = new ZooKeeperUtil(getLedgersRootPath()); + } else { + try { + zkUtil = new ZooKeeperClusterUtil(numOfZKNodes); + } catch (IOException | KeeperException | InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + @BeforeTest + public void setUp() throws Exception { + setUp(getLedgersRootPath()); + } + + protected void setUp(String ledgersRootPath) throws Exception { + LOG.info("Setting up test {}", getClass()); + InMemoryMetaStore.reset(); + setMetastoreImplClass(baseConf); + setMetastoreImplClass(baseClientConf); + executor = Executors.newCachedThreadPool(); + + Stopwatch sw = Stopwatch.createStarted(); + try { + // start zookeeper service + startZKCluster(); + // start bookkeeper service + this.metadataServiceUri = getMetadataServiceUri(ledgersRootPath); + startBKCluster(metadataServiceUri); + LOG.info("Setup testcase {} @ metadata service {} in {} ms.", + testName, metadataServiceUri, sw.elapsed(TimeUnit.MILLISECONDS)); + } catch (Exception e) { + LOG.error("Error setting up", e); + throw e; + } + } + + protected String getMetadataServiceUri(String ledgersRootPath) { + return zkUtil.getMetadataServiceUri(ledgersRootPath); + } + + private String getLedgersRootPath() { + return changeLedgerPath() + "/ledgers"; + } + + protected String changeLedgerPath() { + return ""; + } + + @AfterTest(alwaysRun = true) + public void tearDown() throws Exception { + boolean failed = false; + for (Throwable e : asyncExceptions) { + LOG.error("Got async exception: ", e); + failed = true; + } + assertFalse(failed, "Async failure"); + Stopwatch sw = Stopwatch.createStarted(); + LOG.info("TearDown"); + Exception tearDownException = null; + // stop bookkeeper service + try { + stopBKCluster(); + } catch (Exception e) { + LOG.error("Got Exception while trying to stop BKCluster", e); + tearDownException = e; + } + // stop zookeeper service + try { + // cleanup for metrics. + metadataStore.close(); + stopZKCluster(); + } catch (Exception e) { + LOG.error("Got Exception while trying to stop ZKCluster", e); + tearDownException = e; + } + // cleanup temp dirs + try { + tmpDirs.cleanup(); + } catch (Exception e) { + LOG.error("Got Exception while trying to cleanupTempDirs", e); + tearDownException = e; + } + + executor.shutdownNow(); + + LOG.info("Tearing down test {} in {} ms.", testName, sw.elapsed(TimeUnit.MILLISECONDS)); + if (tearDownException != null) { + throw tearDownException; + } + } + + /** + * Start zookeeper cluster. + * + * @throws Exception + */ + protected void startZKCluster() throws Exception { + zkUtil.startCluster(); + zkc = zkUtil.getZooKeeperClient(); + metadataStore = new FaultInjectionMetadataStore( + MetadataStoreExtended.create(zkUtil.getZooKeeperConnectString(), + MetadataStoreConfig.builder().build())); + } + + /** + * Stop zookeeper cluster. + * + * @throws Exception + */ + protected void stopZKCluster() throws Exception { + zkUtil.killCluster(); + } + + /** + * Start cluster. Also, starts the auto recovery process for each bookie, if + * isAutoRecoveryEnabled is true. + * + * @throws Exception + */ + protected void startBKCluster(String metadataServiceUri) throws Exception { + baseConf.setMetadataServiceUri(metadataServiceUri); + baseClientConf.setMetadataServiceUri(metadataServiceUri); + baseClientConf.setAllocatorPoolingPolicy(PoolingPolicy.UnpooledHeap); + + if (numBookies > 0) { + bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + } + + // Create Bookie Servers (B1, B2, B3) + for (int i = 0; i < numBookies; i++) { + bookiePorts.add(startNewBookie()); + } + } + + /** + * Stop cluster. Also, stops all the auto recovery processes for the bookie + * cluster, if isAutoRecoveryEnabled is true. + * + * @throws Exception + */ + protected void stopBKCluster() throws Exception { + if (bkc != null) { + bkc.close(); + } + + for (ServerTester t : servers) { + t.shutdown(); + } + servers.clear(); + bookiePorts.removeIf(PortManager::releaseLockedPort); + } + + protected ServerConfiguration newServerConfiguration() throws Exception { + File f = tmpDirs.createNew("bookie", "test"); + + int port; + if (baseConf.isEnableLocalTransport() || !baseConf.getAllowEphemeralPorts()) { + port = nextLockedFreePort(); + } else { + port = 0; + } + return newServerConfiguration(port, f, new File[] { f }); + } + + protected ClientConfiguration newClientConfiguration() { + return new ClientConfiguration(baseConf); + } + + protected ServerConfiguration newServerConfiguration(int port, File journalDir, File[] ledgerDirs) { + ServerConfiguration conf = new ServerConfiguration(baseConf); + conf.setBookiePort(port); + conf.setJournalDirName(journalDir.getPath()); + String[] ledgerDirNames = new String[ledgerDirs.length]; + for (int i = 0; i < ledgerDirs.length; i++) { + ledgerDirNames[i] = ledgerDirs[i].getPath(); + } + conf.setLedgerDirNames(ledgerDirNames); + conf.setEnableTaskExecutionStats(true); + conf.setAllocatorPoolingPolicy(PoolingPolicy.UnpooledHeap); + return conf; + } + + protected void stopAllBookies() throws Exception { + stopAllBookies(true); + } + + protected void stopAllBookies(boolean shutdownClient) throws Exception { + for (ServerTester t : servers) { + t.shutdown(); + } + servers.clear(); + if (shutdownClient && bkc != null) { + bkc.close(); + bkc = null; + } + } + + protected String newMetadataServiceUri(String ledgersRootPath) { + return zkUtil.getMetadataServiceUri(ledgersRootPath); + } + + protected String newMetadataServiceUri(String ledgersRootPath, String type) { + return zkUtil.getMetadataServiceUri(ledgersRootPath, type); + } + + /** + * Get bookie address for bookie at index. + */ + public BookieId getBookie(int index) throws Exception { + return servers.get(index).getServer().getBookieId(); + } + + protected List bookieAddresses() throws Exception { + List bookieIds = new ArrayList<>(); + for (ServerTester a : servers) { + bookieIds.add(a.getServer().getBookieId()); + } + return bookieIds; + } + + protected List bookieLedgerDirs() throws Exception { + return servers.stream() + .flatMap(t -> Arrays.stream(t.getConfiguration().getLedgerDirs())) + .collect(Collectors.toList()); + } + + protected List bookieJournalDirs() throws Exception { + return servers.stream() + .flatMap(t -> Arrays.stream(t.getConfiguration().getJournalDirs())) + .collect(Collectors.toList()); + } + + protected BookieId addressByIndex(int index) throws Exception { + return servers.get(index).getServer().getBookieId(); + } + + protected BookieServer serverByIndex(int index) throws Exception { + return servers.get(index).getServer(); + } + + protected ServerConfiguration confByIndex(int index) throws Exception { + return servers.get(index).getConfiguration(); + } + + private Optional byAddress(BookieId addr) throws UnknownHostException { + for (ServerTester s : servers) { + if (s.getServer().getBookieId().equals(addr)) { + return Optional.of(s); + } + } + return Optional.empty(); + } + + protected int indexOfServer(BookieServer b) throws Exception { + for (int i = 0; i < servers.size(); i++) { + if (servers.get(i).getServer().equals(b)) { + return i; + } + } + return -1; + } + + protected int lastBookieIndex() { + return servers.size() - 1; + } + + protected int bookieCount() { + return servers.size(); + } + + private OptionalInt indexByAddress(BookieId addr) throws UnknownHostException { + for (int i = 0; i < servers.size(); i++) { + if (addr.equals(servers.get(i).getServer().getBookieId())) { + return OptionalInt.of(i); + } + } + return OptionalInt.empty(); + } + + /** + * Get bookie configuration for bookie. + */ + public ServerConfiguration getBkConf(BookieId addr) throws Exception { + return byAddress(addr).get().getConfiguration(); + } + + /** + * Kill a bookie by its socket address. Also, stops the autorecovery process + * for the corresponding bookie server, if isAutoRecoveryEnabled is true. + * + * @param addr + * Socket Address + * @return the configuration of killed bookie + * @throws InterruptedException + */ + public ServerConfiguration killBookie(BookieId addr) throws Exception { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + if (tester.get().autoRecovery != null + && tester.get().autoRecovery.getAuditor() != null + && tester.get().autoRecovery.getAuditor().isRunning()) { + LOG.warn("Killing bookie {} who is the current Auditor", addr); + } + servers.remove(tester.get()); + tester.get().shutdown(); + return tester.get().getConfiguration(); + } + return null; + } + + /** + * Set the bookie identified by its socket address to readonly. + * + * @param addr + * Socket Address + * @throws InterruptedException + */ + public void setBookieToReadOnly(BookieId addr) throws Exception { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + tester.get().getServer().getBookie().getStateManager().transitionToReadOnlyMode().get(); + } + } + + /** + * Kill a bookie by index. Also, stops the respective auto recovery process + * for this bookie, if isAutoRecoveryEnabled is true. + * + * @param index + * Bookie Index + * @return the configuration of killed bookie + * @throws InterruptedException + * @throws IOException + */ + public ServerConfiguration killBookie(int index) throws Exception { + ServerTester tester = servers.remove(index); + tester.shutdown(); + return tester.getConfiguration(); + } + + /** + * Kill bookie by index and verify that it's stopped. + * + * @param index index of bookie to kill + * + * @return configuration of killed bookie + */ + public ServerConfiguration killBookieAndWaitForZK(int index) throws Exception { + ServerTester tester = servers.get(index); // IKTODO: this method is awful + ServerConfiguration ret = killBookie(index); + while (zkc.exists("/ledgers/" + AVAILABLE_NODE + "/" + + tester.getServer().getBookieId().toString(), false) != null) { + Thread.sleep(500); + } + return ret; + } + + /** + * Sleep a bookie. + * + * @param addr + * Socket Address + * @param seconds + * Sleep seconds + * @return Count Down latch which will be counted down just after sleep begins + * @throws InterruptedException + * @throws IOException + */ + public CountDownLatch sleepBookie(BookieId addr, final int seconds) + throws Exception { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + CountDownLatch latch = new CountDownLatch(1); + Thread sleeper = new Thread() { + @Override + public void run() { + try { + tester.get().getServer().suspendProcessing(); + LOG.info("bookie {} is asleep", tester.get().getAddress()); + latch.countDown(); + Thread.sleep(seconds * 1000); + tester.get().getServer().resumeProcessing(); + LOG.info("bookie {} is awake", tester.get().getAddress()); + } catch (Exception e) { + LOG.error("Error suspending bookie", e); + } + } + }; + sleeper.start(); + return latch; + } else { + throw new IOException("Bookie not found"); + } + } + + /** + * Sleep a bookie until I count down the latch. + * + * @param addr + * Socket Address + * @param l + * Latch to wait on + * @throws InterruptedException + * @throws IOException + */ + public void sleepBookie(BookieId addr, final CountDownLatch l) + throws InterruptedException, IOException { + final CountDownLatch suspendLatch = new CountDownLatch(1); + sleepBookie(addr, l, suspendLatch); + suspendLatch.await(); + } + + public void sleepBookie(BookieId addr, final CountDownLatch l, final CountDownLatch suspendLatch) + throws InterruptedException, IOException { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + BookieServer bookie = tester.get().getServer(); + LOG.info("Sleep bookie {}.", addr); + Thread sleeper = new Thread() { + @Override + public void run() { + try { + bookie.suspendProcessing(); + if (null != suspendLatch) { + suspendLatch.countDown(); + } + l.await(); + bookie.resumeProcessing(); + } catch (Exception e) { + LOG.error("Error suspending bookie", e); + } + } + }; + sleeper.start(); + } else { + throw new IOException("Bookie not found"); + } + } + + /** + * Restart bookie servers. Also restarts all the respective auto recovery + * process, if isAutoRecoveryEnabled is true. + * + * @throws InterruptedException + * @throws IOException + * @throws KeeperException + * @throws BookieException + */ + public void restartBookies() + throws Exception { + restartBookies(c -> c); + } + + /** + * Restart a bookie. Also restart the respective auto recovery process, + * if isAutoRecoveryEnabled is true. + * + * @param addr + * @throws InterruptedException + * @throws IOException + * @throws KeeperException + * @throws BookieException + */ + public void restartBookie(BookieId addr) throws Exception { + OptionalInt toRemove = indexByAddress(addr); + if (toRemove.isPresent()) { + ServerConfiguration newConfig = killBookie(toRemove.getAsInt()); + Thread.sleep(1000); + startAndAddBookie(newConfig); + } else { + throw new IOException("Bookie not found"); + } + } + + public void restartBookies(Function reconfFunction) + throws Exception { + // shut down bookie server + List confs = new ArrayList<>(); + for (ServerTester server : servers) { + server.shutdown(); + confs.add(server.getConfiguration()); + } + servers.clear(); + Thread.sleep(1000); + // restart them to ensure we can't + for (ServerConfiguration conf : confs) { + // ensure the bookie port is loaded correctly + startAndAddBookie(reconfFunction.apply(conf)); + } + } + + /** + * Helper method to startup a new bookie server with the indicated port + * number. Also, starts the auto recovery process, if the + * isAutoRecoveryEnabled is set true. + * + * @throws IOException + */ + public int startNewBookie() + throws Exception { + return startNewBookieAndReturnAddress().getPort(); + } + + public BookieSocketAddress startNewBookieAndReturnAddress() + throws Exception { + ServerConfiguration conf = newServerConfiguration(); + LOG.info("Starting new bookie on port: {}", conf.getBookiePort()); + return startAndAddBookie(conf).getServer().getLocalAddress(); + } + + public BookieId startNewBookieAndReturnBookieId() + throws Exception { + ServerConfiguration conf = newServerConfiguration(); + LOG.info("Starting new bookie on port: {}", conf.getBookiePort()); + return startAndAddBookie(conf).getServer().getBookieId(); + } + + protected ServerTester startAndAddBookie(ServerConfiguration conf) throws Exception { + ServerTester server = startBookie(conf); + servers.add(server); + return server; + } + + protected ServerTester startAndAddBookie(ServerConfiguration conf, Bookie b) throws Exception { + ServerTester server = startBookie(conf, b); + servers.add(server); + return server; + } + /** + * Helper method to startup a bookie server using a configuration object. + * Also, starts the auto recovery process if isAutoRecoveryEnabled is true. + * + * @param conf + * Server Configuration Object + * + */ + protected ServerTester startBookie(ServerConfiguration conf) + throws Exception { + ServerTester tester = new ServerTester(conf); + + if (bkc == null) { + bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + } + + BookieId address = tester.getServer().getBookieId(); + Future waitForBookie = conf.isForceReadOnlyBookie() + ? bkc.waitForReadOnlyBookie(address) + : bkc.waitForWritableBookie(address); + + tester.getServer().start(); + + waitForBookie.get(30, TimeUnit.SECONDS); + LOG.info("New bookie '{}' has been created.", address); + + if (isAutoRecoveryEnabled()) { + tester.startAutoRecovery(); + } + + int port = conf.getBookiePort(); + + Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> { + while (zkc.exists("/ledgers/" + AVAILABLE_NODE + "/" + + tester.getServer().getBookieId().toString(), false) == null) { + Thread.sleep(100); + } + return true; + }); + bkc.readBookiesBlocking(); + + LOG.info("New bookie on port " + port + " has been created."); + + return tester; + } + + /** + * Start a bookie with the given bookie instance. Also, starts the auto + * recovery for this bookie, if isAutoRecoveryEnabled is true. + */ + protected ServerTester startBookie(ServerConfiguration conf, final Bookie b) + throws Exception { + ServerTester tester = new ServerTester(conf, b); + if (bkc == null) { + bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + } + BookieId address = tester.getServer().getBookieId(); + Future waitForBookie = conf.isForceReadOnlyBookie() + ? bkc.waitForReadOnlyBookie(address) + : bkc.waitForWritableBookie(address); + + tester.getServer().start(); + + waitForBookie.get(30, TimeUnit.SECONDS); + + if (isAutoRecoveryEnabled()) { + tester.startAutoRecovery(); + } + + int port = conf.getBookiePort(); + Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> + metadataStore.exists( + getLedgersRootPath() + "/available/" + address).join() + ); + bkc.readBookiesBlocking(); + + LOG.info("New bookie '{}' has been created.", address); + return tester; + } + + public void setMetastoreImplClass(AbstractConfiguration conf) { + conf.setMetastoreImplClass(InMemoryMetaStore.class.getName()); + } + + /** + * Flags used to enable/disable the auto recovery process. If it is enabled, + * starting the bookie server will starts the auto recovery process for that + * bookie. Also, stopping bookie will stops the respective auto recovery + * process. + * + * @param isAutoRecoveryEnabled + * Value true will enable the auto recovery process. Value false + * will disable the auto recovery process + */ + public void setAutoRecoveryEnabled(boolean isAutoRecoveryEnabled) { + this.isAutoRecoveryEnabled = isAutoRecoveryEnabled; + } + + /** + * Flag used to check whether auto recovery process is enabled/disabled. By + * default the flag is false. + * + * @return true, if the auto recovery is enabled. Otherwise return false. + */ + public boolean isAutoRecoveryEnabled() { + return isAutoRecoveryEnabled; + } + + /** + * Will starts the auto recovery process for the bookie servers. One auto + * recovery process per each bookie server, if isAutoRecoveryEnabled is + * enabled. + */ + public void startReplicationService() throws Exception { + for (ServerTester t : servers) { + t.startAutoRecovery(); + } + } + + /** + * Will stops all the auto recovery processes for the bookie cluster, if + * isAutoRecoveryEnabled is true. + */ + public void stopReplicationService() throws Exception{ + for (ServerTester t : servers) { + t.stopAutoRecovery(); + } + } + + public Auditor getAuditor(int timeout, TimeUnit unit) throws Exception { + final long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, unit); + while (System.nanoTime() < timeoutAt) { + for (ServerTester t : servers) { + Auditor a = t.getAuditor(); + ReplicationWorker replicationWorker = t.getReplicationWorker(); + + // found a candidate Auditor + ReplicationWorker + if (a != null && a.isRunning() + && replicationWorker != null && replicationWorker.isRunning()) { + int deathWatchInterval = t.getConfiguration().getDeathWatchInterval(); + Thread.sleep(deathWatchInterval + 1000); + } + + // double check, because in the meantime AutoRecoveryDeathWatcher may have killed the + // AutoRecovery daemon + if (a != null && a.isRunning() + && replicationWorker != null && replicationWorker.isRunning()) { + LOG.info("Found Auditor Bookie {}", t.getServer().getBookieId()); + return a; + } + } + Thread.sleep(100); + } + throw new Exception("No auditor found"); + } + + /** + * Check whether the InetSocketAddress was created using a hostname or an IP + * address. Represent as 'hostname/IPaddress' if the InetSocketAddress was + * created using hostname. Represent as '/IPaddress' if the + * InetSocketAddress was created using an IPaddress + * + * @param bookieId id + * @return true if the address was created using an IP address, false if the + * address was created using a hostname + */ + public boolean isCreatedFromIp(BookieId bookieId) { + BookieSocketAddress addr = bkc.getBookieAddressResolver().resolve(bookieId); + return addr.getSocketAddress().toString().startsWith("/"); + } + + public void resetBookieOpLoggers() { + servers.forEach(t -> t.getStatsProvider().clear()); + } + + public TestStatsProvider getStatsProvider(BookieId addr) throws UnknownHostException { + return byAddress(addr).get().getStatsProvider(); + } + + public TestStatsProvider getStatsProvider(int index) throws Exception { + return servers.get(index).getStatsProvider(); + } + +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieAutoRecoveryTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieAutoRecoveryTest.java new file mode 100644 index 0000000000000..888303d3e665c --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieAutoRecoveryTest.java @@ -0,0 +1,651 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNull; +import static org.testng.AssertJUnit.assertTrue; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.SortedMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.BookKeeperTestClient; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataClientDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.Watcher.Event.EventType; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Integration tests verifies the complete functionality of the + * Auditor-rereplication process: Auditor will publish the bookie failures, + * consequently ReplicationWorker will get the notifications and act on it. + */ +public class BookieAutoRecoveryTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(BookieAutoRecoveryTest.class); + private static final byte[] PASSWD = "admin".getBytes(); + private static final byte[] data = "TESTDATA".getBytes(); + private static final String openLedgerRereplicationGracePeriod = "3000"; // milliseconds + + private DigestType digestType; + private MetadataClientDriver metadataClientDriver; + private LedgerManagerFactory mFactory; + private LedgerUnderreplicationManager underReplicationManager; + private LedgerManager ledgerManager; + private OrderedScheduler scheduler; + + private final String underreplicatedPath = "/ledgers/underreplication/ledgers"; + + public BookieAutoRecoveryTest() throws Exception { + super(3); + + baseConf.setLedgerManagerFactoryClassName( + "org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + baseConf.setOpenLedgerRereplicationGracePeriod(openLedgerRereplicationGracePeriod); + baseConf.setRwRereplicateBackoffMs(500); + baseClientConf.setLedgerManagerFactoryClassName( + "org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + this.digestType = DigestType.MAC; + setAutoRecoveryEnabled(true); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + baseClientConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + + scheduler = OrderedScheduler.newSchedulerBuilder() + .name("test-scheduler") + .numThreads(1) + .build(); + + metadataClientDriver = MetadataDrivers.getClientDriver( + URI.create(baseClientConf.getMetadataServiceUri())); + metadataClientDriver.initialize( + baseClientConf, + scheduler, + NullStatsLogger.INSTANCE, + Optional.empty()); + + // initialize urReplicationManager + mFactory = metadataClientDriver.getLedgerManagerFactory(); + underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + ledgerManager = mFactory.newLedgerManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + + if (null != underReplicationManager) { + underReplicationManager.close(); + underReplicationManager = null; + } + if (null != ledgerManager) { + ledgerManager.close(); + ledgerManager = null; + } + if (null != metadataClientDriver) { + metadataClientDriver.close(); + metadataClientDriver = null; + } + if (null != scheduler) { + scheduler.shutdown(); + } + } + + /** + * Test verifies publish urLedger by Auditor and replication worker is + * picking up the entries and finishing the rereplication of open ledger. + */ + @Test + public void testOpenLedgers() throws Exception { + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + LedgerHandle lh = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + BookieId replicaToKillAddr = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + final String urLedgerZNode = getUrLedgerZNode(lh); + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + + CountDownLatch latch = new CountDownLatch(1); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + latch = new CountDownLatch(1); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + // starting the replication service, so that he will be able to act as + // target bookie + startNewBookie(); + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(0), ledgerReplicaIndex); + } + + /** + * Test verifies publish urLedger by Auditor and replication worker is + * picking up the entries and finishing the rereplication of closed ledgers. + */ + @Test + public void testClosedLedgers() throws Exception { + List listOfReplicaIndex = new ArrayList(); + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + closeLedgers(listOfLedgerHandle); + LedgerHandle lhandle = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + BookieId replicaToKillAddr = lhandle.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + CountDownLatch latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + listOfReplicaIndex.add(ledgerReplicaIndex); + assertNull("UrLedger already exists!", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + + // Again watching the urLedger znode to know the replication status + latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + String urLedgerZNode = getUrLedgerZNode(lh); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + } + + // starting the replication service, so that he will be able to act as + // target bookie + startNewBookie(); + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + + // waiting to finish replication + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + for (int index = 0; index < listOfLedgerHandle.size(); index++) { + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(index), + listOfReplicaIndex.get(index)); + } + } + + /** + * Test stopping replica service while replication in progress. Considering + * when there is an exception will shutdown Auditor and RW processes. After + * restarting should be able to finish the re-replication activities + */ + @Test + public void testStopWhileReplicationInProgress() throws Exception { + int numberOfLedgers = 2; + List listOfReplicaIndex = new ArrayList(); + List listOfLedgerHandle = createLedgersAndAddEntries( + numberOfLedgers, 5); + closeLedgers(listOfLedgerHandle); + LedgerHandle handle = listOfLedgerHandle.get(0); + BookieId replicaToKillAddr = handle.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie:" + replicaToKillAddr); + + // Each ledger, there will be two events : create urLedger and after + // rereplication delete urLedger + CountDownLatch latch = new CountDownLatch(listOfLedgerHandle.size()); + for (int i = 0; i < listOfLedgerHandle.size(); i++) { + final String urLedgerZNode = getUrLedgerZNode(listOfLedgerHandle + .get(i)); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + int replicaIndexInLedger = getReplicaIndexInLedger( + listOfLedgerHandle.get(i), replicaToKillAddr); + listOfReplicaIndex.add(replicaIndexInLedger); + } + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + + // Again watching the urLedger znode to know the replication status + latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + String urLedgerZNode = getUrLedgerZNode(lh); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + } + + // starting the replication service, so that he will be able to act as + // target bookie + startNewBookie(); + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + while (true) { + if (latch.getCount() < numberOfLedgers || latch.getCount() <= 0) { + stopReplicationService(); + LOG.info("Latch Count is:" + latch.getCount()); + break; + } + // grace period to take breath + Thread.sleep(1000); + } + + startReplicationService(); + + LOG.info("Waiting to finish rereplication processes"); + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + for (int index = 0; index < listOfLedgerHandle.size(); index++) { + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(index), + listOfReplicaIndex.get(index)); + } + } + + /** + * Verify the published urledgers of deleted ledgers(those ledgers where + * deleted after publishing as urledgers by Auditor) should be cleared off + * by the newly selected replica bookie. + */ + @Test + public void testNoSuchLedgerExists() throws Exception { + List listOfLedgerHandle = createLedgersAndAddEntries(2, 5); + CountDownLatch latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + assertNull("UrLedger already exists!", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + BookieId replicaToKillAddr = listOfLedgerHandle.get(0) + .getLedgerMetadata().getAllEnsembles() + .get(0L).get(0); + killBookie(replicaToKillAddr); + replicaToKillAddr = listOfLedgerHandle.get(0) + .getLedgerMetadata().getAllEnsembles() + .get(0L).get(0); + killBookie(replicaToKillAddr); + // waiting to publish urLedger znode by Auditor + latch.await(); + + latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + + // delete ledgers + for (LedgerHandle lh : listOfLedgerHandle) { + bkc.deleteLedger(lh.getId()); + } + startNewBookie(); + + // waiting to delete published urledgers, since it doesn't exists + latch.await(); + + for (LedgerHandle lh : listOfLedgerHandle) { + assertNull("UrLedger still exists after rereplication", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + } + + /** + * Test that if a empty ledger loses the bookie not in the quorum for entry 0, it will + * still be openable when it loses enough bookies to lose a whole quorum. + */ + @Test + public void testEmptyLedgerLosesQuorumEventually() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 2, 2, DigestType.CRC32, PASSWD); + CountDownLatch latch = new CountDownLatch(1); + String urZNode = getUrLedgerZNode(lh); + watchUrLedgerNode(urZNode, latch); + + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(2); + LOG.info("Killing last bookie, {}, in ensemble {}", replicaToKill, + lh.getLedgerMetadata().getAllEnsembles().get(0L)); + killBookie(replicaToKill); + startNewBookie(); + + getAuditor(10, TimeUnit.SECONDS).submitAuditTask().get(); // ensure auditor runs + + assertTrue("Should be marked as underreplicated", latch.await(5, TimeUnit.SECONDS)); + latch = new CountDownLatch(1); + Stat s = watchUrLedgerNode(urZNode, latch); // should be marked as replicated + if (s != null) { + assertTrue("Should be marked as replicated", latch.await(15, TimeUnit.SECONDS)); + } + + replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(1); + LOG.info("Killing second bookie, {}, in ensemble {}", replicaToKill, + lh.getLedgerMetadata().getAllEnsembles().get(0L)); + killBookie(replicaToKill); + + getAuditor(10, TimeUnit.SECONDS).submitAuditTask().get(); // ensure auditor runs + + assertTrue("Should be marked as underreplicated", latch.await(5, TimeUnit.SECONDS)); + latch = new CountDownLatch(1); + s = watchUrLedgerNode(urZNode, latch); // should be marked as replicated + + startNewBookie(); + getAuditor(10, TimeUnit.SECONDS).submitAuditTask().get(); // ensure auditor runs + + if (s != null) { + assertTrue("Should be marked as replicated", latch.await(20, TimeUnit.SECONDS)); + } + + // should be able to open ledger without issue + bkc.openLedger(lh.getId(), DigestType.CRC32, PASSWD); + } + + /** + * Test verifies bookie recovery, the host (recorded via ipaddress in + * ledgermetadata). + */ + @Test + public void testLedgerMetadataContainsIpAddressAsBookieID() + throws Exception { + stopBKCluster(); + bkc = new BookKeeperTestClient(baseClientConf); + // start bookie with useHostNameAsBookieID=false, as old bookie + ServerConfiguration serverConf1 = newServerConfiguration(); + // start 2 more bookies with useHostNameAsBookieID=true + ServerConfiguration serverConf2 = newServerConfiguration(); + serverConf2.setUseHostNameAsBookieID(true); + ServerConfiguration serverConf3 = newServerConfiguration(); + serverConf3.setUseHostNameAsBookieID(true); + startAndAddBookie(serverConf1); + startAndAddBookie(serverConf2); + startAndAddBookie(serverConf3); + + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + LedgerHandle lh = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + final SortedMap> ensembles = lh.getLedgerMetadata().getAllEnsembles(); + final List bkAddresses = ensembles.get(0L); + BookieId replicaToKillAddr = bkAddresses.get(0); + for (BookieId bookieSocketAddress : bkAddresses) { + if (!isCreatedFromIp(bookieSocketAddress)) { + replicaToKillAddr = bookieSocketAddress; + LOG.info("Kill bookie which has registered using hostname"); + break; + } + } + + final String urLedgerZNode = getUrLedgerZNode(lh); + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + + CountDownLatch latch = new CountDownLatch(1); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + latch = new CountDownLatch(1); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + // starting the replication service, so that he will be able to act as + // target bookie + ServerConfiguration serverConf = newServerConfiguration(); + serverConf.setUseHostNameAsBookieID(false); + startAndAddBookie(serverConf); + + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(0), ledgerReplicaIndex); + + } + + /** + * Test verifies bookie recovery, the host (recorded via useHostName in + * ledgermetadata). + */ + @Test + public void testLedgerMetadataContainsHostNameAsBookieID() + throws Exception { + stopBKCluster(); + + bkc = new BookKeeperTestClient(baseClientConf); + // start bookie with useHostNameAsBookieID=false, as old bookie + ServerConfiguration serverConf1 = newServerConfiguration(); + // start 2 more bookies with useHostNameAsBookieID=true + ServerConfiguration serverConf2 = newServerConfiguration(); + serverConf2.setUseHostNameAsBookieID(true); + ServerConfiguration serverConf3 = newServerConfiguration(); + serverConf3.setUseHostNameAsBookieID(true); + startAndAddBookie(serverConf1); + startAndAddBookie(serverConf2); + startAndAddBookie(serverConf3); + + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + LedgerHandle lh = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + final SortedMap> ensembles = lh.getLedgerMetadata().getAllEnsembles(); + final List bkAddresses = ensembles.get(0L); + BookieId replicaToKillAddr = bkAddresses.get(0); + for (BookieId bookieSocketAddress : bkAddresses) { + if (isCreatedFromIp(bookieSocketAddress)) { + replicaToKillAddr = bookieSocketAddress; + LOG.info("Kill bookie which has registered using ipaddress"); + break; + } + } + + final String urLedgerZNode = getUrLedgerZNode(lh); + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + + CountDownLatch latch = new CountDownLatch(1); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + latch = new CountDownLatch(1); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + // creates new bkclient + bkc = new BookKeeperTestClient(baseClientConf); + // starting the replication service, so that he will be able to act as + // target bookie + ServerConfiguration serverConf = newServerConfiguration(); + serverConf.setUseHostNameAsBookieID(true); + startAndAddBookie(serverConf); + + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(0), ledgerReplicaIndex); + + } + + private int getReplicaIndexInLedger(LedgerHandle lh, BookieId replicaToKill) { + SortedMap> ensembles = lh.getLedgerMetadata().getAllEnsembles(); + int ledgerReplicaIndex = -1; + for (BookieId addr : ensembles.get(0L)) { + ++ledgerReplicaIndex; + if (addr.equals(replicaToKill)) { + break; + } + } + return ledgerReplicaIndex; + } + + private void verifyLedgerEnsembleMetadataAfterReplication( + BookieServer newBookieServer, LedgerHandle lh, + int ledgerReplicaIndex) throws Exception { + LedgerHandle openLedger = bkc + .openLedger(lh.getId(), digestType, PASSWD); + + BookieId inetSocketAddress = openLedger.getLedgerMetadata().getAllEnsembles().get(0L) + .get(ledgerReplicaIndex); + assertEquals("Rereplication has been failed and ledgerReplicaIndex :" + + ledgerReplicaIndex, newBookieServer.getBookieId(), + inetSocketAddress); + openLedger.close(); + } + + private void closeLedgers(List listOfLedgerHandle) + throws InterruptedException, BKException { + for (LedgerHandle lh : listOfLedgerHandle) { + lh.close(); + } + } + + private List createLedgersAndAddEntries(int numberOfLedgers, + int numberOfEntries) + throws InterruptedException, BKException { + List listOfLedgerHandle = new ArrayList( + numberOfLedgers); + for (int index = 0; index < numberOfLedgers; index++) { + LedgerHandle lh = bkc.createLedger(3, 3, digestType, PASSWD); + listOfLedgerHandle.add(lh); + for (int i = 0; i < numberOfEntries; i++) { + lh.addEntry(data); + } + } + return listOfLedgerHandle; + } + + private String getUrLedgerZNode(LedgerHandle lh) { + return ZkLedgerUnderreplicationManager.getUrLedgerZnode( + underreplicatedPath, lh.getId()); + } + + private Stat watchUrLedgerNode(final String znode, + final CountDownLatch latch) throws KeeperException, + InterruptedException { + return zkc.exists(znode, new Watcher() { + @Override + public void process(WatchedEvent event) { + if (event.getType() == EventType.NodeDeleted) { + LOG.info("Received Ledger rereplication completion event :" + + event.getType()); + latch.countDown(); + } + if (event.getType() == EventType.NodeCreated) { + LOG.info("Received urLedger publishing event :" + + event.getType()); + latch.countDown(); + } + } + }); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieLedgerIndexTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieLedgerIndexTest.java new file mode 100644 index 0000000000000..1d5cf868cce65 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieLedgerIndexTest.java @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.bookkeeper.PulsarLayoutManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests verifies bookie vs ledger mapping generating by the BookieLedgerIndexer. + */ +public class BookieLedgerIndexTest extends BookKeeperClusterTestCase { + + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // private final static Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory + .getLogger(BookieLedgerIndexTest.class); + + private Random rng; // Random Number Generator + private ArrayList entries; // generated entries + private final DigestType digestType = DigestType.CRC32; + private int numberOfLedgers = 3; + private List ledgerList; + private LedgerManagerFactory newLedgerManagerFactory; + private LedgerManager ledgerManager; + + public BookieLedgerIndexTest() throws Exception { + this("org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + } + + BookieLedgerIndexTest(String ledgerManagerFactory) throws Exception { + super(3); + LOG.info("Running test case using ledger manager : " + + ledgerManagerFactory); + // set ledger manager name + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + public void setUp() throws Exception { + super.setUp(); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + rng = new Random(System.currentTimeMillis()); // Initialize the Random + // Number Generator + entries = new ArrayList(); // initialize the entries list + ledgerList = new ArrayList(3); + + String ledgersRoot = "/ledgers"; + String storeUri = metadataServiceUri.replaceAll("zk://", "").replaceAll("/ledgers", ""); + MetadataStoreExtended store = MetadataStoreExtended.create(storeUri, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRoot); + newLedgerManagerFactory = new PulsarLedgerManagerFactory(); + + ClientConfiguration conf = new ClientConfiguration(); + conf.setZkLedgersRootPath(ledgersRoot); + newLedgerManagerFactory.initialize(conf, layoutManager, 1); + ledgerManager = newLedgerManagerFactory.newLedgerManager(); + } + + @AfterMethod + public void tearDown() throws Exception { + super.tearDown(); + if (null != newLedgerManagerFactory) { + newLedgerManagerFactory.close(); + newLedgerManagerFactory = null; + } + if (null != ledgerManager) { + ledgerManager.close(); + ledgerManager = null; + } + } + + /** + * Verify the bookie-ledger mapping with minimum number of bookies and few + * ledgers. + */ + @Test + public void testSimpleBookieLedgerMapping() throws Exception { + + for (int i = 0; i < numberOfLedgers; i++) { + createAndAddEntriesToLedger().close(); + } + + BookieLedgerIndexer bookieLedgerIndex = new BookieLedgerIndexer( + ledgerManager); + + Map> bookieToLedgerIndex = bookieLedgerIndex + .getBookieToLedgerIndex(); + + assertEquals("Missed few bookies in the bookie-ledger mapping!", 3, + bookieToLedgerIndex.size()); + Collection> bk2ledgerEntry = bookieToLedgerIndex.values(); + for (Set ledgers : bk2ledgerEntry) { + assertEquals("Missed few ledgers in the bookie-ledger mapping!", 3, + ledgers.size()); + for (Long ledgerId : ledgers) { + assertTrue("Unknown ledger-bookie mapping", ledgerList + .contains(ledgerId)); + } + } + } + + /** + * Verify ledger index with failed bookies and throws exception. + */ + @SuppressWarnings("deprecation") +// @Test +// public void testWithoutZookeeper() throws Exception { +// // This test case is for ledger metadata that stored in ZooKeeper. As +// // far as MSLedgerManagerFactory, ledger metadata are stored in other +// // storage. So this test is not suitable for MSLedgerManagerFactory. +// if (newLedgerManagerFactory instanceof org.apache.bookkeeper.meta.MSLedgerManagerFactory) { +// return; +// } +// +// for (int i = 0; i < numberOfLedgers; i++) { +// createAndAddEntriesToLedger().close(); +// } +// +// BookieLedgerIndexer bookieLedgerIndex = new BookieLedgerIndexer( +// ledgerManager); +// stopZKCluster(); +// try { +// bookieLedgerIndex.getBookieToLedgerIndex(); +// fail("Must throw exception as zookeeper are not running!"); +// } catch (BKAuditException bkAuditException) { +// // expected behaviour +// } +// } + + /** + * Verify indexing with multiple ensemble reformation. + */ + @Test + public void testEnsembleReformation() throws Exception { + try { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + LedgerHandle lh2 = createAndAddEntriesToLedger(); + + startNewBookie(); + shutdownBookie(lastBookieIndex() - 1); + + // add few more entries after ensemble reformation + for (int i = 0; i < 10; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(Integer.MAX_VALUE)); + entry.position(0); + + entries.add(entry.array()); + lh1.addEntry(entry.array()); + lh2.addEntry(entry.array()); + } + + BookieLedgerIndexer bookieLedgerIndex = new BookieLedgerIndexer( + ledgerManager); + + Map> bookieToLedgerIndex = bookieLedgerIndex + .getBookieToLedgerIndex(); + assertEquals("Missed few bookies in the bookie-ledger mapping!", 4, + bookieToLedgerIndex.size()); + Collection> bk2ledgerEntry = bookieToLedgerIndex.values(); + for (Set ledgers : bk2ledgerEntry) { + assertEquals( + "Missed few ledgers in the bookie-ledger mapping!", 2, + ledgers.size()); + for (Long ledgerNode : ledgers) { + assertTrue("Unknown ledger-bookie mapping", ledgerList + .contains(ledgerNode)); + } + } + } catch (BKException e) { + LOG.error("Test failed", e); + fail("Test failed due to BookKeeper exception"); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOG.error("Test failed", e); + fail("Test failed due to interruption"); + } + } + + private void shutdownBookie(int bkShutdownIndex) throws Exception { + killBookie(bkShutdownIndex); + } + + private LedgerHandle createAndAddEntriesToLedger() throws BKException, + InterruptedException { + int numEntriesToWrite = 20; + // Create a ledger + LedgerHandle lh = bkc.createLedger(digestType, "admin".getBytes()); + LOG.info("Ledger ID: " + lh.getId()); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(Integer.MAX_VALUE)); + entry.position(0); + + entries.add(entry.array()); + lh.addEntry(entry.array()); + } + ledgerList.add(lh.getId()); + return lh; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ReplicationTestUtil.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ReplicationTestUtil.java new file mode 100644 index 0000000000000..4360bf3254675 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ReplicationTestUtil.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import java.util.List; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; + +/** + * Utility class for replication tests. + */ +public class ReplicationTestUtil { + + /** + * Checks whether ledger is in under-replication. + */ + public static boolean isLedgerInUnderReplication(ZooKeeper zkc, long id, + String basePath) throws KeeperException, InterruptedException { + List children; + try { + children = zkc.getChildren(basePath, true); + } catch (KeeperException.NoNodeException nne) { + return false; + } + + boolean isMatched = false; + for (String child : children) { + if (child.startsWith("urL") && child.contains(String.valueOf(id))) { + isMatched = true; + break; + } else { + String path = basePath + '/' + child; + try { + if (zkc.getChildren(path, false).size() > 0) { + isMatched = isLedgerInUnderReplication(zkc, id, path); + } + } catch (KeeperException.NoNodeException nne) { + return false; + } + } + + } + return isMatched; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestAutoRecoveryAlongWithBookieServers.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestAutoRecoveryAlongWithBookieServers.java new file mode 100644 index 0000000000000..11797c8373715 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestAutoRecoveryAlongWithBookieServers.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import java.util.Enumeration; +import java.util.List; +import java.util.Map.Entry; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.util.BookKeeperConstants; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test auto recovery. + */ +public class TestAutoRecoveryAlongWithBookieServers extends + BookKeeperClusterTestCase { + + private String basePath = ""; + + public TestAutoRecoveryAlongWithBookieServers() throws Exception { + super(3); + setAutoRecoveryEnabled(true); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + basePath = BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + @Override + protected void startBKCluster(String metadataServiceUri) throws Exception { + super.startBKCluster(metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + } + + /** + * Tests that the auto recovery service along with Bookie servers itself. + */ + @Test + public void testAutoRecoveryAlongWithBookieServers() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + "testpasswd".getBytes()); + byte[] testData = "testBuiltAutoRecovery".getBytes(); + + for (int i = 0; i < 10; i++) { + lh.addEntry(testData); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), + basePath)) { + Thread.sleep(100); + } + + // Killing all bookies except newly replicated bookie + for (Entry> entry : + lh.getLedgerMetadata().getAllEnsembles().entrySet()) { + List bookies = entry.getValue(); + for (BookieId bookie : bookies) { + if (bookie.equals(newBkAddr)) { + continue; + } + killBookie(bookie); + } + } + + // Should be able to read the entries from 0-9 + LedgerHandle lhs = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, "testpasswd".getBytes()); + Enumeration entries = lhs.readEntries(0, 9); + assertTrue("Should have the elements", entries.hasMoreElements()); + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + assertEquals("testBuiltAutoRecovery", new String(entry.getEntry())); + } + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestReplicationWorker.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestReplicationWorker.java new file mode 100644 index 0000000000000..7938feaba19fe --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestReplicationWorker.java @@ -0,0 +1,1248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.apache.bookkeeper.replication.ReplicationStats.NUM_ENTRIES_UNABLE_TO_READ_FOR_REPLICATION; +import static org.apache.bookkeeper.replication.ReplicationStats.REPLICATION_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNull; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import io.netty.util.HashedWheelTimer; +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.Enumeration; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Optional; +import java.util.TimerTask; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; +import lombok.Cleanup; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperTestClient; +import org.apache.bookkeeper.client.ClientUtil; +import org.apache.bookkeeper.client.EnsemblePlacementPolicy; +import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.ZoneawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.feature.FeatureProvider; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataClientDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.exceptions.MetadataException; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.DNSToSwitchMapping; +import org.apache.bookkeeper.proto.BookieAddressResolver; +import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException; +import org.apache.bookkeeper.stats.Counter; +import org.apache.bookkeeper.stats.Gauge; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.apache.bookkeeper.util.BookKeeperConstants; +import org.apache.bookkeeper.util.StaticDNSResolver; +import org.apache.bookkeeper.zookeeper.ZooKeeperClient; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.awaitility.Awaitility; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test the ReplicationWroker, where it has to replicate the fragments from + * failed Bookies to given target Bookie. + */ +public class TestReplicationWorker extends BookKeeperClusterTestCase { + + private static final byte[] TESTPASSWD = "testpasswd".getBytes(); + private static final Logger LOG = LoggerFactory + .getLogger(TestReplicationWorker.class); + private String basePath = ""; + private String baseLockPath = ""; + private MetadataBookieDriver driver; + private LedgerManagerFactory mFactory; + private LedgerUnderreplicationManager underReplicationManager; + private LedgerManager ledgerManager; + private static byte[] data = "TestReplicationWorker".getBytes(); + private OrderedScheduler scheduler; + private String zkLedgersRootPath; + + public TestReplicationWorker() throws Exception { + this("org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + } + + TestReplicationWorker(String ledgerManagerFactory) throws Exception { + super(3, 300); + LOG.info("Running test case using ledger manager : " + + ledgerManagerFactory); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + // set ledger manager name + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseConf.setRereplicationEntryBatchSize(3); + baseConf.setZkTimeout(7000); + baseConf.setZkRetryBackoffMaxMs(500); + baseConf.setZkRetryBackoffStartMs(10); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + zkLedgersRootPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseClientConf); + basePath = zkLedgersRootPath + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; + baseLockPath = zkLedgersRootPath + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + "/locks"; + baseClientConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + this.scheduler = OrderedScheduler.newSchedulerBuilder() + .name("test-scheduler") + .numThreads(1) + .build(); + + this.driver = MetadataDrivers.getBookieDriver( + URI.create(baseConf.getMetadataServiceUri())); + this.driver.initialize( + baseConf, + NullStatsLogger.INSTANCE); + // initialize urReplicationManager + mFactory = driver.getLedgerManagerFactory(); + ledgerManager = mFactory.newLedgerManager(); + underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + if (null != ledgerManager) { + ledgerManager.close(); + ledgerManager = null; + } + if (null != underReplicationManager) { + underReplicationManager.close(); + underReplicationManager = null; + } + if (null != driver) { + driver.close(); + } + if (null != scheduler) { + scheduler.shutdown(); + scheduler = null; + } + if (null != mFactory) { + mFactory.close(); + } + } + + /** + * Tests that replication worker should replicate the failed bookie + * fragments to target bookie given to the worker. + */ + @Test + public void testRWShouldReplicateFragmentsToTargetBookie() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + + ReplicationWorker rw = new ReplicationWorker(baseConf); + + rw.start(); + try { + + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + + killAllBookies(lh, newBkAddr); + + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + } finally { + rw.shutdown(); + } + } + + /** + * Tests that replication worker should retry for replication until enough + * bookies available for replication. + */ + @Test + public void testRWShouldRetryUntilThereAreEnoughBksAvailableForReplication() + throws Exception { + LedgerHandle lh = bkc.createLedger(1, 1, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie : {}", replicaToKill); + ServerConfiguration killedBookieConfig = killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr :" + newBkAddr); + + killAllBookies(lh, newBkAddr); + ReplicationWorker rw = new ReplicationWorker(baseConf); + + rw.start(); + try { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + int counter = 30; + while (counter-- > 0) { + assertTrue("Expecting that replication should not complete", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)); + Thread.sleep(100); + } + // restart killed bookie + startAndAddBookie(killedBookieConfig); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + } finally { + rw.shutdown(); + } + } + + /** + * Tests that replication worker1 should take one fragment replication and + * other replication worker also should compete for the replication. + */ + @Test + public void test2RWsShouldCompeteForReplicationOf2FragmentsAndCompleteReplication() + throws Exception { + LedgerHandle lh = bkc.createLedger(2, 2, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie : {}", replicaToKill); + ServerConfiguration killedBookieConfig = killBookie(replicaToKill); + + killAllBookies(lh, null); + // Starte RW1 + BookieId newBkAddr1 = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr1); + ReplicationWorker rw1 = new ReplicationWorker(baseConf); + + // Starte RW2 + BookieId newBkAddr2 = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr2); + ReplicationWorker rw2 = new ReplicationWorker(baseConf); + rw1.start(); + rw2.start(); + + try { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + int counter = 10; + while (counter-- > 0) { + assertTrue("Expecting that replication should not complete", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)); + Thread.sleep(100); + } + // restart killed bookie + startAndAddBookie(killedBookieConfig); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + } finally { + rw1.shutdown(); + rw2.shutdown(); + } + } + + /** + * Tests that Replication worker should clean the leadger under replication + * node of the ledger already deleted. + */ + @Test + public void testRWShouldCleanTheLedgerFromUnderReplicationIfLedgerAlreadyDeleted() + throws Exception { + LedgerHandle lh = bkc.createLedger(2, 2, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + ReplicationWorker rw = new ReplicationWorker(baseConf); + rw.start(); + + try { + bkc.deleteLedger(lh.getId()); // Deleting the ledger + // Also mark ledger as in UnderReplication + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + } finally { + rw.shutdown(); + } + + } + + @Test + public void testMultipleLedgerReplicationWithReplicationWorker() + throws Exception { + // Ledger1 + LedgerHandle lh1 = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh1.addEntry(data); + } + BookieId replicaToKillFromFirstLedger = lh1.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKillFromFirstLedger); + + // Ledger2 + LedgerHandle lh2 = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh2.addEntry(data); + } + BookieId replicaToKillFromSecondLedger = lh2.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKillFromSecondLedger); + + // Kill ledger1 + killBookie(replicaToKillFromFirstLedger); + lh1.close(); + // Kill ledger2 + killBookie(replicaToKillFromFirstLedger); + lh2.close(); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + ReplicationWorker rw = new ReplicationWorker(baseConf); + + rw.start(); + try { + + // Mark ledger1 and 2 as underreplicated + underReplicationManager.markLedgerUnderreplicated(lh1.getId(), + replicaToKillFromFirstLedger.toString()); + underReplicationManager.markLedgerUnderreplicated(lh2.getId(), + replicaToKillFromSecondLedger.toString()); + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh1 + .getId(), basePath)) { + Thread.sleep(100); + } + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh2 + .getId(), basePath)) { + Thread.sleep(100); + } + + killAllBookies(lh1, newBkAddr); + + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh1, 0, 9); + verifyRecoveredLedgers(lh2, 0, 9); + } finally { + rw.shutdown(); + } + + } + + /** + * Tests that ReplicationWorker should fence the ledger and release ledger + * lock after timeout. Then replication should happen normally. + */ + @Test + public void testRWShouldReplicateTheLedgersAfterTimeoutIfLastFragmentIsUR() + throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + // set to 3s instead of default 30s + baseConf.setOpenLedgerRereplicationGracePeriod("3000"); + ReplicationWorker rw = new ReplicationWorker(baseConf); + + @Cleanup MetadataClientDriver clientDriver = MetadataDrivers.getClientDriver( + URI.create(baseClientConf.getMetadataServiceUri())); + clientDriver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = clientDriver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory + .newLedgerUnderreplicationManager(); + rw.start(); + try { + + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + killAllBookies(lh, newBkAddr); + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + lh = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, TESTPASSWD); + assertFalse("Ledger must have been closed by RW", ClientUtil + .isLedgerOpen(lh)); + } finally { + rw.shutdown(); + underReplicationManager.close(); + } + + } + + @Test + public void testBookiesNotAvailableScenarioForReplicationWorker() throws Exception { + int ensembleSize = 3; + LedgerHandle lh = bkc.createLedger(ensembleSize, ensembleSize, BookKeeper.DigestType.CRC32, TESTPASSWD); + + int numOfEntries = 7; + for (int i = 0; i < numOfEntries; i++) { + lh.addEntry(data); + } + lh.close(); + + BookieId[] bookiesKilled = new BookieId[ensembleSize]; + ServerConfiguration[] killedBookiesConfig = new ServerConfiguration[ensembleSize]; + + // kill all bookies + for (int i = 0; i < ensembleSize; i++) { + bookiesKilled[i] = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(i); + killedBookiesConfig[i] = getBkConf(bookiesKilled[i]); + LOG.info("Killing Bookie : {}", bookiesKilled[i]); + killBookie(bookiesKilled[i]); + } + + // start new bookiesToKill number of bookies + for (int i = 0; i < ensembleSize; i++) { + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + } + + // create couple of replicationworkers + ServerConfiguration newRWConf = new ServerConfiguration(baseConf); + newRWConf.setLockReleaseOfFailedLedgerGracePeriod("64"); + ReplicationWorker rw1 = new ReplicationWorker(newRWConf); + ReplicationWorker rw2 = new ReplicationWorker(newRWConf); + + @Cleanup + MetadataClientDriver clientDriver = MetadataDrivers + .getClientDriver(URI.create(baseClientConf.getMetadataServiceUri())); + clientDriver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = clientDriver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + try { + //mark ledger underreplicated + for (int i = 0; i < bookiesKilled.length; i++) { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), bookiesKilled[i].toString()); + } + while (!ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) { + Thread.sleep(100); + } + rw1.start(); + rw2.start(); + + AtomicBoolean isBookieRestarted = new AtomicBoolean(false); + + (new Thread(new Runnable() { + @Override + public void run() { + try { + Thread.sleep(3000); + isBookieRestarted.set(true); + /* + * after sleeping for 3000 msecs, restart one of the + * bookie, so that replication can succeed. + */ + startBookie(killedBookiesConfig[0]); + } catch (Exception e) { + e.printStackTrace(); + } + } + })).start(); + + int rw1PrevFailedAttemptsCount = 0; + int rw2PrevFailedAttemptsCount = 0; + while (!isBookieRestarted.get()) { + /* + * since all the bookies containing the ledger entries are down + * replication wouldnt have succeeded. + */ + assertTrue("Ledger: " + lh.getId() + " should be underreplicated", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)); + + // the number of failed attempts should have increased. + int rw1CurFailedAttemptsCount = rw1.replicationFailedLedgers.get(lh.getId()).get(); + assertTrue( + "The current number of failed attempts: " + rw1CurFailedAttemptsCount + + " should be greater than or equal to previous value: " + rw1PrevFailedAttemptsCount, + rw1CurFailedAttemptsCount >= rw1PrevFailedAttemptsCount); + rw1PrevFailedAttemptsCount = rw1CurFailedAttemptsCount; + + int rw2CurFailedAttemptsCount = rw2.replicationFailedLedgers.get(lh.getId()).get(); + assertTrue( + "The current number of failed attempts: " + rw2CurFailedAttemptsCount + + " should be greater than or equal to previous value: " + rw2PrevFailedAttemptsCount, + rw2CurFailedAttemptsCount >= rw2PrevFailedAttemptsCount); + rw2PrevFailedAttemptsCount = rw2CurFailedAttemptsCount; + + Thread.sleep(50); + } + + /** + * since one of the killed bookie is restarted, replicationworker + * should succeed in replicating this under replicated ledger and it + * shouldn't be under replicated anymore. + */ + int timeToWaitForReplicationToComplete = 20000; + int timeWaited = 0; + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) { + Thread.sleep(100); + timeWaited += 100; + if (timeWaited == timeToWaitForReplicationToComplete) { + fail("Ledger should be replicated by now"); + } + } + + rw1PrevFailedAttemptsCount = rw1.replicationFailedLedgers.get(lh.getId()).get(); + rw2PrevFailedAttemptsCount = rw2.replicationFailedLedgers.get(lh.getId()).get(); + Thread.sleep(2000); + // now since the ledger is replicated, number of failed attempts + // counter shouldn't be increased even after sleeping for sometime. + assertEquals("rw1 failedattempts", rw1PrevFailedAttemptsCount, + rw1.replicationFailedLedgers.get(lh.getId()).get()); + assertEquals("rw2 failed attempts ", rw2PrevFailedAttemptsCount, + rw2.replicationFailedLedgers.get(lh.getId()).get()); + + /* + * Since these entries are eventually available, and replication has + * eventually succeeded, in one of the RW + * unableToReadEntriesForReplication should be 0. + */ + int rw1UnableToReadEntriesForReplication = rw1.unableToReadEntriesForReplication.get(lh.getId()).size(); + int rw2UnableToReadEntriesForReplication = rw2.unableToReadEntriesForReplication.get(lh.getId()).size(); + assertTrue( + "unableToReadEntriesForReplication in RW1: " + rw1UnableToReadEntriesForReplication + + " in RW2: " + + rw2UnableToReadEntriesForReplication, + (rw1UnableToReadEntriesForReplication == 0) + || (rw2UnableToReadEntriesForReplication == 0)); + } finally { + rw1.shutdown(); + rw2.shutdown(); + underReplicationManager.close(); + } + } + + class InjectedReplicationWorker extends ReplicationWorker { + CopyOnWriteArrayList delayReplicationPeriods; + + public InjectedReplicationWorker(ServerConfiguration conf, StatsLogger statsLogger, + CopyOnWriteArrayList delayReplicationPeriods) + throws CompatibilityException, ReplicationException.UnavailableException, + InterruptedException, IOException { + super(conf, statsLogger); + this.delayReplicationPeriods = delayReplicationPeriods; + } + + @Override + protected void scheduleTaskWithDelay(TimerTask timerTask, long delayPeriod) { + delayReplicationPeriods.add(delayPeriod); + super.scheduleTaskWithDelay(timerTask, delayPeriod); + } + } + + @Test + public void testDeferLedgerLockReleaseForReplicationWorker() throws Exception { + int ensembleSize = 3; + LedgerHandle lh = bkc.createLedger(ensembleSize, ensembleSize, BookKeeper.DigestType.CRC32, TESTPASSWD); + int numOfEntries = 7; + for (int i = 0; i < numOfEntries; i++) { + lh.addEntry(data); + } + lh.close(); + + BookieId[] bookiesKilled = new BookieId[ensembleSize]; + ServerConfiguration[] killedBookiesConfig = new ServerConfiguration[ensembleSize]; + + // kill all bookies + for (int i = 0; i < ensembleSize; i++) { + bookiesKilled[i] = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(i); + killedBookiesConfig[i] = getBkConf(bookiesKilled[i]); + LOG.info("Killing Bookie : {}", bookiesKilled[i]); + killBookie(bookiesKilled[i]); + } + + // start new bookiesToKill number of bookies + for (int i = 0; i < ensembleSize; i++) { + startNewBookieAndReturnBookieId(); + } + + // create couple of replicationworkers + long lockReleaseOfFailedLedgerGracePeriod = 64L; + long baseBackoffForLockReleaseOfFailedLedger = lockReleaseOfFailedLedgerGracePeriod + / (int) Math.pow(2, ReplicationWorker.NUM_OF_EXPONENTIAL_BACKOFF_RETRIALS); + ServerConfiguration newRWConf = new ServerConfiguration(baseConf); + newRWConf.setLockReleaseOfFailedLedgerGracePeriod(Long.toString(lockReleaseOfFailedLedgerGracePeriod)); + newRWConf.setRereplicationEntryBatchSize(1000); + CopyOnWriteArrayList rw1DelayReplicationPeriods = new CopyOnWriteArrayList(); + CopyOnWriteArrayList rw2DelayReplicationPeriods = new CopyOnWriteArrayList(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger1 = statsProvider.getStatsLogger("rw1"); + TestStatsLogger statsLogger2 = statsProvider.getStatsLogger("rw2"); + ReplicationWorker rw1 = new InjectedReplicationWorker(newRWConf, statsLogger1, rw1DelayReplicationPeriods); + ReplicationWorker rw2 = new InjectedReplicationWorker(newRWConf, statsLogger2, rw2DelayReplicationPeriods); + + Counter numEntriesUnableToReadForReplication1 = statsLogger1 + .getCounter(NUM_ENTRIES_UNABLE_TO_READ_FOR_REPLICATION); + Counter numEntriesUnableToReadForReplication2 = statsLogger2 + .getCounter(NUM_ENTRIES_UNABLE_TO_READ_FOR_REPLICATION); + @Cleanup + MetadataClientDriver clientDriver = MetadataDrivers + .getClientDriver(URI.create(baseClientConf.getMetadataServiceUri())); + clientDriver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = clientDriver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + try { + // mark ledger underreplicated + for (int i = 0; i < bookiesKilled.length; i++) { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), bookiesKilled[i].toString()); + } + while (!ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) { + Thread.sleep(100); + } + rw1.start(); + rw2.start(); + + // wait for RWs to complete 'numOfAttemptsToWaitFor' failed attempts + int numOfAttemptsToWaitFor = 10; + while ((rw1.replicationFailedLedgers.get(lh.getId()).get() < numOfAttemptsToWaitFor) + || rw2.replicationFailedLedgers.get(lh.getId()).get() < numOfAttemptsToWaitFor) { + Thread.sleep(500); + } + + /* + * since all the bookies containing the ledger entries are down + * replication wouldn't have succeeded. + */ + assertTrue("Ledger: " + lh.getId() + " should be underreplicated", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)); + + /* + * since RW failed 'numOfAttemptsToWaitFor' number of times, we + * should have atleast (numOfAttemptsToWaitFor - 1) + * delayReplicationPeriods and their value should be + * (lockReleaseOfFailedLedgerGracePeriod/16) , 2 * previous value,.. + * with max : lockReleaseOfFailedLedgerGracePeriod + */ + for (int i = 0; i < ((numOfAttemptsToWaitFor - 1)); i++) { + long expectedDelayValue = Math.min(lockReleaseOfFailedLedgerGracePeriod, + baseBackoffForLockReleaseOfFailedLedger * (1 << i)); + assertEquals("RW1 delayperiod", (Long) expectedDelayValue, rw1DelayReplicationPeriods.get(i)); + assertEquals("RW2 delayperiod", (Long) expectedDelayValue, rw2DelayReplicationPeriods.get(i)); + } + + /* + * RW wont try to replicate until and unless RW succeed in reading + * those failed entries before proceeding with replication of under + * replicated fragment, so the numEntriesUnableToReadForReplication + * should be just 'numOfEntries', though RW failed to replicate + * multiple times. + */ + assertEquals("numEntriesUnableToReadForReplication for RW1", Long.valueOf((long) numOfEntries), + numEntriesUnableToReadForReplication1.get()); + assertEquals("numEntriesUnableToReadForReplication for RW2", Long.valueOf((long) numOfEntries), + numEntriesUnableToReadForReplication2.get()); + + /* + * Since these entries are unavailable, + * unableToReadEntriesForReplication should be of size numOfEntries. + */ + assertEquals("RW1 unabletoreadentries", numOfEntries, + rw1.unableToReadEntriesForReplication.get(lh.getId()).size()); + assertEquals("RW2 unabletoreadentries", numOfEntries, + rw2.unableToReadEntriesForReplication.get(lh.getId()).size()); + } finally { + rw1.shutdown(); + rw2.shutdown(); + underReplicationManager.close(); + } + } + + /** + * Tests that ReplicationWorker should not have identified for postponing + * the replication if ledger is in open state and lastFragment is not in + * underReplication state. Note that RW should not fence such ledgers. + */ + @Test + public void testRWShouldReplicateTheLedgersAfterTimeoutIfLastFragmentIsNotUR() + throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + // Reform ensemble...Making sure that last fragment is not in + // under-replication + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + + ReplicationWorker rw = new ReplicationWorker(baseConf); + + baseClientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + @Cleanup MetadataClientDriver driver = MetadataDrivers.getClientDriver( + URI.create(baseClientConf.getMetadataServiceUri())); + driver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory + .newLedgerUnderreplicationManager(); + + rw.start(); + try { + + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + + killAllBookies(lh, newBkAddr); + + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + lh = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, TESTPASSWD); + + // Ledger should be still in open state + assertTrue("Ledger must have been closed by RW", ClientUtil + .isLedgerOpen(lh)); + } finally { + rw.shutdown(); + underReplicationManager.close(); + } + + } + + /** + * Test that the replication worker will not shutdown on a simple ZK disconnection. + */ + @Test + public void testRWZKConnectionLost() throws Exception { + try (ZooKeeperClient zk = ZooKeeperClient.newBuilder() + .connectString(zkUtil.getZooKeeperConnectString()) + .sessionTimeoutMs(10000) + .build()) { + + ReplicationWorker rw = new ReplicationWorker(baseConf); + rw.start(); + for (int i = 0; i < 10; i++) { + if (rw.isRunning()) { + break; + } + Thread.sleep(1000); + } + assertTrue("Replication worker should be running", rw.isRunning()); + + stopZKCluster(); + // ZK is down for shorter period than reconnect timeout + Thread.sleep(1000); + startZKCluster(); + + assertTrue("Replication worker should not shutdown", rw.isRunning()); + } + } + + /** + * Test that the replication worker shuts down on non-recoverable ZK connection loss. + */ + @Test + public void testRWZKConnectionLostOnNonRecoverableZkError() throws Exception { + for (int j = 0; j < 3; j++) { + LedgerHandle lh = bkc.createLedger(1, 1, 1, + BookKeeper.DigestType.CRC32, TESTPASSWD, + null); + final long createdLedgerId = lh.getId(); + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + } + + killBookie(2); + killBookie(1); + startNewBookie(); + startNewBookie(); + + servers.get(0).getConfiguration().setRwRereplicateBackoffMs(100); + servers.get(0).startAutoRecovery(); + + Auditor auditor = getAuditor(10, TimeUnit.SECONDS); + ReplicationWorker rw = servers.get(0).getReplicationWorker(); + + ZkLedgerUnderreplicationManager ledgerUnderreplicationManager = + (ZkLedgerUnderreplicationManager) FieldUtils.readField(auditor, + "ledgerUnderreplicationManager", true); + + ZooKeeper zkc = (ZooKeeper) FieldUtils.readField(ledgerUnderreplicationManager, "zkc", true); + auditor.submitAuditTask().get(); + + assertTrue(zkc.getState().isConnected()); + zkc.close(); + assertFalse(zkc.getState().isConnected()); + + auditor.submitAuditTask(); + rw.run(); + + for (int i = 0; i < 10; i++) { + if (!rw.isRunning() && !auditor.isRunning()) { + break; + } + Thread.sleep(1000); + } + assertFalse("Replication worker should NOT be running", rw.isRunning()); + assertFalse("Auditor should NOT be running", auditor.isRunning()); + } + + private void killAllBookies(LedgerHandle lh, BookieId excludeBK) + throws Exception { + // Killing all bookies except newly replicated bookie + for (Entry> entry : + lh.getLedgerMetadata().getAllEnsembles().entrySet()) { + List bookies = entry.getValue(); + for (BookieId bookie : bookies) { + if (bookie.equals(excludeBK)) { + continue; + } + killBookie(bookie); + } + } + } + + private void verifyRecoveredLedgers(LedgerHandle lh, long startEntryId, + long endEntryId) throws BKException, InterruptedException { + LedgerHandle lhs = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, TESTPASSWD); + Enumeration entries = lhs.readEntries(startEntryId, + endEntryId); + assertTrue("Should have the elements", entries.hasMoreElements()); + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + assertEquals("TestReplicationWorker", new String(entry.getEntry())); + } + } + + @Test + public void testReplicateEmptyOpenStateLedger() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, 2, BookKeeper.DigestType.CRC32, TESTPASSWD); + assertFalse(lh.getLedgerMetadata().isClosed()); + + List firstEnsemble = lh.getLedgerMetadata().getAllEnsembles().firstEntry().getValue(); + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue(); + killBookie(ensemble.get(1)); + + startNewBookie(); + baseConf.setOpenLedgerRereplicationGracePeriod(String.valueOf(30)); + ReplicationWorker replicationWorker = new ReplicationWorker(baseConf); + replicationWorker.start(); + + try { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), ensemble.get(1).toString()); + Awaitility.waitAtMost(60, TimeUnit.SECONDS).untilAsserted(() -> + assertFalse(ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) + ); + + LedgerHandle lh1 = bkc.openLedgerNoRecovery(lh.getId(), BookKeeper.DigestType.CRC32, TESTPASSWD); + assertTrue(lh1.getLedgerMetadata().isClosed()); + } finally { + replicationWorker.shutdown(); + } + } + + @Test + public void testRepairedNotAdheringPlacementPolicyLedgerFragmentsOnRack() throws Exception { + testRepairedNotAdheringPlacementPolicyLedgerFragments(RackawareEnsemblePlacementPolicy.class, null); + } + + @Test + public void testReplicationStats() throws Exception { + BiConsumer checkReplicationStats = (first, rw) -> { + try { + final Method rereplicate = rw.getClass().getDeclaredMethod("rereplicate"); + rereplicate.setAccessible(true); + final Object result = rereplicate.invoke(rw); + final Field statsLoggerField = rw.getClass().getDeclaredField("statsLogger"); + statsLoggerField.setAccessible(true); + final TestStatsLogger statsLogger = (TestStatsLogger) statsLoggerField.get(rw); + + final Counter numDeferLedgerLockReleaseOfFailedLedgerCounter = + statsLogger.getCounter(ReplicationStats.NUM_DEFER_LEDGER_LOCK_RELEASE_OF_FAILED_LEDGER); + final Counter numLedgersReplicatedCounter = + statsLogger.getCounter(ReplicationStats.NUM_FULL_OR_PARTIAL_LEDGERS_REPLICATED); + final Counter numNotAdheringPlacementLedgersCounter = statsLogger + .getCounter(ReplicationStats.NUM_NOT_ADHERING_PLACEMENT_LEDGERS_REPLICATED); + + assertEquals("NUM_DEFER_LEDGER_LOCK_RELEASE_OF_FAILED_LEDGER", + 1, numDeferLedgerLockReleaseOfFailedLedgerCounter.get().longValue()); + + if (first) { + assertFalse((boolean) result); + assertEquals("NUM_FULL_OR_PARTIAL_LEDGERS_REPLICATED", + 0, numLedgersReplicatedCounter.get().longValue()); + assertEquals("NUM_NOT_ADHERING_PLACEMENT_LEDGERS_REPLICATED", + 0, numNotAdheringPlacementLedgersCounter.get().longValue()); + + } else { + assertTrue((boolean) result); + assertEquals("NUM_FULL_OR_PARTIAL_LEDGERS_REPLICATED", + 1, numLedgersReplicatedCounter.get().longValue()); + assertEquals("NUM_NOT_ADHERING_PLACEMENT_LEDGERS_REPLICATED", + 1, numNotAdheringPlacementLedgersCounter.get().longValue()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + testRepairedNotAdheringPlacementPolicyLedgerFragments( + RackawareEnsemblePlacementPolicy.class, checkReplicationStats); + } + + private void testRepairedNotAdheringPlacementPolicyLedgerFragments( + Class placementPolicyClass, + BiConsumer checkReplicationStats) throws Exception { + List firstThreeBookies = servers.stream().map(ele -> { + try { + return ele.getServer().getBookieId(); + } catch (UnknownHostException e) { + return null; + } + }).filter(Objects::nonNull).collect(Collectors.toList()); + + baseClientConf.setProperty("reppDnsResolverClass", StaticDNSResolver.class.getName()); + baseClientConf.setProperty("enforceStrictZoneawarePlacement", false); + bkc.close(); + bkc = new BookKeeperTestClient(baseClientConf) { + @Override + protected EnsemblePlacementPolicy initializeEnsemblePlacementPolicy(ClientConfiguration conf, + DNSToSwitchMapping dnsResolver, + HashedWheelTimer timer, + FeatureProvider featureProvider, + StatsLogger statsLogger, + BookieAddressResolver bookieAddressResolver) + throws IOException { + EnsemblePlacementPolicy ensemblePlacementPolicy = null; + if (ZoneawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildZoneAwareEnsemblePlacementPolicy(firstThreeBookies); + } else if (RackawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildRackAwareEnsemblePlacementPolicy(firstThreeBookies); + } + ensemblePlacementPolicy.initialize(conf, Optional.ofNullable(dnsResolver), timer, + featureProvider, statsLogger, bookieAddressResolver); + return ensemblePlacementPolicy; + } + }; + + //This ledger not adhering placement policy, the combine(0,1,2) rack is 1. + LedgerHandle lh = bkc.createLedger(3, 3, 3, BookKeeper.DigestType.CRC32, TESTPASSWD); + + int entrySize = 10; + for (int i = 0; i < entrySize; i++) { + lh.addEntry(data); + } + lh.close(); + + int minNumRacksPerWriteQuorumConfValue = 2; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + servConf.setProperty("reppDnsResolverClass", StaticDNSResolver.class.getName()); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(1000); + servConf.setRepairedPlacementPolicyNotAdheringBookieEnable(true); + + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + 1, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + } + + ZooKeeper zk = getZk((PulsarMetadataClientDriver) bkc.getMetadataClientDriver()); + + + Stat stat = zk.exists("/ledgers/underreplication/ledgers/0000/0000/0000/0000/urL0000000000", false); + assertNotNull(stat); + + baseConf.setRepairedPlacementPolicyNotAdheringBookieEnable(true); + BookKeeper bookKeeper = new BookKeeperTestClient(baseClientConf) { + @Override + protected EnsemblePlacementPolicy initializeEnsemblePlacementPolicy(ClientConfiguration conf, + DNSToSwitchMapping dnsResolver, + HashedWheelTimer timer, + FeatureProvider featureProvider, + StatsLogger statsLogger, + BookieAddressResolver bookieAddressResolver) + throws IOException { + EnsemblePlacementPolicy ensemblePlacementPolicy = null; + if (ZoneawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildZoneAwareEnsemblePlacementPolicy(firstThreeBookies); + } else if (RackawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildRackAwareEnsemblePlacementPolicy(firstThreeBookies); + } + ensemblePlacementPolicy.initialize(conf, Optional.ofNullable(dnsResolver), timer, + featureProvider, statsLogger, bookieAddressResolver); + return ensemblePlacementPolicy; + } + }; + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(REPLICATION_SCOPE); + ReplicationWorker rw = new ReplicationWorker(baseConf, bookKeeper, false, statsLogger); + + if (checkReplicationStats != null) { + checkReplicationStats.accept(true, rw); + } else { + rw.start(); + } + + //start new bookie, the rack is /rack2 + BookieId newBookieId = startNewBookieAndReturnBookieId(); + + if (checkReplicationStats != null) { + checkReplicationStats.accept(false, rw); + } + + Awaitility.await().untilAsserted(() -> { + LedgerMetadata metadata = bkc.getLedgerManager().readLedgerMetadata(lh.getId()).get().getValue(); + List newBookies = metadata.getAllEnsembles().get(0L); + assertTrue(newBookies.contains(newBookieId)); + }); + + Awaitility.await().untilAsserted(() -> { + Stat stat1 = zk.exists("/ledgers/underreplication/ledgers/0000/0000/0000/0000/urL0000000000", false); + assertNull(stat1); + }); + + for (BookieId rack1Book : firstThreeBookies) { + killBookie(rack1Book); + } + + verifyRecoveredLedgers(lh, 0, entrySize - 1); + + if (checkReplicationStats == null) { + rw.shutdown(); + } + baseConf.setRepairedPlacementPolicyNotAdheringBookieEnable(false); + bookKeeper.close(); + } + + private EnsemblePlacementPolicy buildRackAwareEnsemblePlacementPolicy(List bookieIds) { + return new RackawareEnsemblePlacementPolicy() { + @Override + public String resolveNetworkLocation(BookieId addr) { + if (bookieIds.contains(addr)) { + return "/rack1"; + } + //The other bookie is /rack2 + return "/rack2"; + } + }; + } + + private EnsemblePlacementPolicy buildZoneAwareEnsemblePlacementPolicy(List firstThreeBookies) { + return new ZoneawareEnsemblePlacementPolicy() { + @Override + protected String resolveNetworkLocation(BookieId addr) { + //The first three bookie 1 is /zone1/ud1 + //The first three bookie 2,3 is /zone1/ud2 + if (firstThreeBookies.get(0).equals(addr)) { + return "/zone1/ud1"; + } else if (firstThreeBookies.contains(addr)) { + return "/zone1/ud2"; + } + //The other bookie is /zone2/ud1 + return "/zone2/ud1"; + } + }; + } + + private TestStatsLogger startAuditorAndWaitForPlacementPolicyCheck(ServerConfiguration servConf, + MutableObject auditorRef) + throws MetadataException, CompatibilityException, KeeperException, + InterruptedException, ReplicationException.UnavailableException, UnknownHostException { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestStatsProvider.TestOpStatsLogger placementPolicyCheckStatsLogger = + (TestStatsProvider.TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + + final AuditorPeriodicCheckTest.TestAuditor auditor = new AuditorPeriodicCheckTest.TestAuditor( + BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, statsLogger, null); + auditorRef.setValue(auditor); + CountDownLatch latch = auditor.getLatch(); + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 0, + placementPolicyCheckStatsLogger.getSuccessCount()); + urm.setPlacementPolicyCheckCTime(-1); + auditor.start(); + /* + * since placementPolicyCheckCTime is set to -1, placementPolicyCheck should be + * scheduled to run with no initialdelay + */ + assertTrue("placementPolicyCheck should have executed", latch.await(20, TimeUnit.SECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (placementPolicyCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 1, + placementPolicyCheckStatsLogger.getSuccessCount()); + return statsLogger; + } + + private ZooKeeper getZk(PulsarMetadataClientDriver pulsarMetadataClientDriver) throws Exception { + PulsarLedgerManagerFactory pulsarLedgerManagerFactory = + (PulsarLedgerManagerFactory) pulsarMetadataClientDriver.getLedgerManagerFactory(); + Field field = pulsarLedgerManagerFactory.getClass().getDeclaredField("store"); + field.setAccessible(true); + ZKMetadataStore zkMetadataStore = (ZKMetadataStore) field.get(pulsarLedgerManagerFactory); + return zkMetadataStore.getZkClient(); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ZooKeeperUtil.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ZooKeeperUtil.java new file mode 100644 index 0000000000000..5113edb72c49a --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ZooKeeperUtil.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * This file is derived from ZooKeeperUtil from Apache BookKeeper + * http://bookkeeper.apache.org + */ + +package org.apache.bookkeeper.replication; + +import static org.testng.Assert.assertTrue; +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.test.ZooKeeperCluster; +import org.apache.bookkeeper.util.IOUtils; +import org.apache.bookkeeper.util.ZkUtils; +import org.apache.bookkeeper.zookeeper.ZooKeeperClient; +import org.apache.commons.io.FileUtils; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.server.NIOServerCnxnFactory; +import org.apache.zookeeper.server.ZooKeeperServer; +import org.apache.zookeeper.test.ClientBase; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test the zookeeper utilities. + */ +public class ZooKeeperUtil implements ZooKeeperCluster { + + static { + // org.apache.zookeeper.test.ClientBase uses FourLetterWordMain, from 3.5.3 four letter words + // are disabled by default due to security reasons + System.setProperty("zookeeper.4lw.commands.whitelist", "*"); + } + static final Logger LOG = LoggerFactory.getLogger(ZooKeeperUtil.class); + + // ZooKeeper related variables + protected Integer zooKeeperPort = 0; + private InetSocketAddress zkaddr; + + protected ZooKeeperServer zks; + protected ZooKeeper zkc; // zookeeper client + protected NIOServerCnxnFactory serverFactory; + protected File zkTmpDir; + private String connectString; + private String ledgersRootPath; + + public ZooKeeperUtil(String ledgersRootPath) { + this.ledgersRootPath = ledgersRootPath; + String loopbackIPAddr = InetAddress.getLoopbackAddress().getHostAddress(); + zkaddr = new InetSocketAddress(loopbackIPAddr, 0); + connectString = loopbackIPAddr + ":" + zooKeeperPort; + } + + @Override + public ZooKeeper getZooKeeperClient() { + return zkc; + } + + @Override + public String getZooKeeperConnectString() { + return connectString; + } + + @Override + public String getMetadataServiceUri() { + return getMetadataServiceUri("/ledgers"); + } + + @Override + public String getMetadataServiceUri(String zkLedgersRootPath) { + return "zk://" + connectString + zkLedgersRootPath; + } + + @Override + public String getMetadataServiceUri(String zkLedgersRootPath, String type) { + return "zk+" + type + "://" + connectString + zkLedgersRootPath; + } + + @Override + public void startCluster() throws Exception { + // create a ZooKeeper server(dataDir, dataLogDir, port) + LOG.debug("Running ZK server"); + ClientBase.setupTestEnv(); + zkTmpDir = IOUtils.createTempDir("zookeeper", "test"); + + // start the server and client. + restartCluster(); + + // create default bk ensemble + createBKEnsemble(ledgersRootPath); + } + + @Override + public void createBKEnsemble(String ledgersPath) throws KeeperException, InterruptedException { + int last = ledgersPath.lastIndexOf('/'); + if (last > 0) { + String pathToCreate = ledgersPath.substring(0, last); + CompletableFuture future = new CompletableFuture<>(); + if (zkc.exists(pathToCreate, false) == null) { + ZkUtils.asyncCreateFullPathOptimistic(zkc, + pathToCreate, + new byte[0], + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT, (i, s, o, s1) -> { + future.complete(null); + }, null); + } + future.join(); + } + + ZooKeeperCluster.super.createBKEnsemble(ledgersPath); + } + @Override + public void restartCluster() throws Exception { + zks = new ZooKeeperServer(zkTmpDir, zkTmpDir, + ZooKeeperServer.DEFAULT_TICK_TIME); + serverFactory = new NIOServerCnxnFactory(); + serverFactory.configure(zkaddr, 100); + serverFactory.startup(zks); + + if (0 == zooKeeperPort) { + zooKeeperPort = serverFactory.getLocalPort(); + zkaddr = new InetSocketAddress(zkaddr.getAddress().getHostAddress(), zooKeeperPort); + connectString = zkaddr.getAddress().getHostAddress() + ":" + zooKeeperPort; + } + + boolean b = ClientBase.waitForServerUp(getZooKeeperConnectString(), + ClientBase.CONNECTION_TIMEOUT); + LOG.debug("Server up: " + b); + + // create a zookeeper client + LOG.debug("Instantiate ZK Client"); + zkc = ZooKeeperClient.newBuilder() + .connectString(getZooKeeperConnectString()) + .sessionTimeoutMs(10000) + .build(); + } + + @Override + public void sleepCluster(final int time, + final TimeUnit timeUnit, + final CountDownLatch l) + throws InterruptedException, IOException { + Thread[] allthreads = new Thread[Thread.activeCount()]; + Thread.enumerate(allthreads); + for (final Thread t : allthreads) { + if (t.getName().contains("SyncThread:0")) { + Thread sleeper = new Thread() { + @SuppressWarnings("deprecation") + public void run() { + try { + t.suspend(); + l.countDown(); + timeUnit.sleep(time); + t.resume(); + } catch (Exception e) { + LOG.error("Error suspending thread", e); + } + } + }; + sleeper.start(); + return; + } + } + throw new IOException("ZooKeeper thread not found"); + } + + @Override + public void stopCluster() throws Exception { + if (zkc != null) { + zkc.close(); + } + + // shutdown ZK server + if (serverFactory != null) { + serverFactory.shutdown(); + assertTrue(ClientBase.waitForServerDown(getZooKeeperConnectString(), ClientBase.CONNECTION_TIMEOUT), + "waiting for server down"); + } + if (zks != null) { + zks.getTxnLogFactory().close(); + } + } + + @Override + public void killCluster() throws Exception { + stopCluster(); + FileUtils.deleteDirectory(zkTmpDir); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BaseMetadataStoreTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BaseMetadataStoreTest.java index 57a4e388572fa..ec6e6e03eae71 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BaseMetadataStoreTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BaseMetadataStoreTest.java @@ -21,7 +21,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import io.etcd.jetcd.launcher.EtcdCluster; -import io.etcd.jetcd.launcher.EtcdClusterFactory; +import io.etcd.jetcd.test.EtcdClusterExtension; import java.io.File; import java.net.URI; import java.util.UUID; @@ -84,10 +84,11 @@ public Object[][] implementations() { private synchronized String getEtcdClusterConnectString() { if (etcdCluster == null) { - etcdCluster = EtcdClusterFactory.buildCluster("test", 1, false); + etcdCluster = EtcdClusterExtension.builder().withClusterName("test").withNodes(1).withSsl(false).build() + .cluster(); etcdCluster.start(); } - return etcdCluster.getClientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); + return etcdCluster.clientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); } public static Supplier stringSupplier(Supplier supplier) { diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataStoreTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataStoreTest.java index 949b4a9b2bacb..c87e9bda436bf 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataStoreTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataStoreTest.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Optional; import java.util.Set; +import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; @@ -50,8 +51,10 @@ import org.apache.pulsar.metadata.api.Notification; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.Stat; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; import org.assertj.core.util.Lists; import org.awaitility.Awaitility; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @Slf4j @@ -425,6 +428,73 @@ public void testDeleteUnusedDirectories(String provider, Supplier urlSup assertFalse(store.exists(prefix).join()); } + @DataProvider(name = "conditionOfSwitchThread") + public Object[][] conditionOfSwitchThread(){ + return new Object[][]{ + {false, false}, + {false, true}, + {true, false}, + {true, true} + }; + } + + @Test(dataProvider = "conditionOfSwitchThread") + public void testThreadSwitchOfZkMetadataStore(boolean hasSynchronizer, boolean enabledBatch) throws Exception { + final String prefix = newKey(); + final String metadataStoreName = UUID.randomUUID().toString().replaceAll("-", ""); + MetadataStoreConfig.MetadataStoreConfigBuilder builder = + MetadataStoreConfig.builder().metadataStoreName(metadataStoreName); + builder.fsyncEnable(false); + builder.batchingEnabled(enabledBatch); + if (!hasSynchronizer) { + builder.synchronizer(null); + } + MetadataStoreConfig config = builder.build(); + @Cleanup + ZKMetadataStore store = (ZKMetadataStore) MetadataStoreFactory.create(zks.getConnectionString(), config); + + final Runnable verify = () -> { + String currentThreadName = Thread.currentThread().getName(); + String errorMessage = String.format("Expect to switch to thread %s, but currently it is thread %s", + metadataStoreName, currentThreadName); + assertTrue(Thread.currentThread().getName().startsWith(metadataStoreName), errorMessage); + }; + + // put with node which has parent(but the parent node is not exists). + store.put(prefix + "/a1/b1/c1", "value".getBytes(), Optional.of(-1L)).thenApply((ignore) -> { + verify.run(); + return null; + }).join(); + // put. + store.put(prefix + "/b1", "value".getBytes(), Optional.of(-1L)).thenApply((ignore) -> { + verify.run(); + return null; + }).join(); + // get. + store.get(prefix + "/b1").thenApply((ignore) -> { + verify.run(); + return null; + }).join(); + // get the node which is not exists. + store.get(prefix + "/non").thenApply((ignore) -> { + verify.run(); + return null; + }).join(); + // delete. + store.delete(prefix + "/b1", Optional.empty()).thenApply((ignore) -> { + verify.run(); + return null; + }).join(); + // delete the node which is not exists. + store.delete(prefix + "/non", Optional.empty()).thenApply((ignore) -> { + verify.run(); + return null; + }).exceptionally(ex -> { + verify.run(); + return null; + }).join(); + } + @Test(dataProvider = "impl") public void testPersistent(String provider, Supplier urlSupplier) throws Exception { String metadataUrl = urlSupplier.get(); diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java index 726f5ae312d19..33034ddb3fe0f 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java @@ -19,39 +19,33 @@ package org.apache.pulsar.metadata; import static org.testng.Assert.assertTrue; - import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; -import java.net.InetSocketAddress; +import java.lang.reflect.Field; import java.net.Socket; - -import java.nio.charset.StandardCharsets; - +import java.util.Properties; +import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; - import org.apache.commons.io.FileUtils; -import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.server.ContainerManager; -import org.apache.zookeeper.server.NIOServerCnxnFactory; -import org.apache.zookeeper.server.Request; -import org.apache.zookeeper.server.RequestProcessor; import org.apache.zookeeper.server.ServerCnxnFactory; -import org.apache.zookeeper.server.SessionTracker; import org.apache.zookeeper.server.ZooKeeperServer; +import org.apache.zookeeper.server.ZooKeeperServerMain; +import org.apache.zookeeper.server.embedded.ExitHandler; +import org.apache.zookeeper.server.embedded.ZooKeeperServerEmbedded; import org.assertj.core.util.Files; @Slf4j public class TestZKServer implements AutoCloseable { + public static final int TICK_TIME = 1000; - protected ZooKeeperServer zks; - private final File zkDataDir; - private ServerCnxnFactory serverFactory; - private ContainerManager containerManager; - private int zkPort = 0; + private final File zkDataDir; + private int zkPort; // initially this is zero + private ZooKeeperServerEmbedded zooKeeperServerEmbedded; public TestZKServer() throws Exception { this.zkDataDir = Files.newTemporaryFolder(); @@ -64,86 +58,86 @@ public TestZKServer() throws Exception { } public void start() throws Exception { - this.zks = new ZooKeeperServer(zkDataDir, zkDataDir, TICK_TIME); - this.zks.setMaxSessionTimeout(300_000); - this.serverFactory = new NIOServerCnxnFactory(); - this.serverFactory.configure(new InetSocketAddress(zkPort), 1000); - this.serverFactory.startup(zks, true); - - this.zkPort = serverFactory.getLocalPort(); - log.info("Started test ZK server on port {}", zkPort); + final Properties configZookeeper = new Properties(); + configZookeeper.put("clientPort", zkPort + ""); + configZookeeper.put("host", "127.0.0.1"); + configZookeeper.put("ticktime", TICK_TIME + ""); + zooKeeperServerEmbedded = ZooKeeperServerEmbedded + .builder() + .baseDir(zkDataDir.toPath()) + .configuration(configZookeeper) + .exitHandler(ExitHandler.LOG_ONLY) + .build(); + + zooKeeperServerEmbedded.start(60_000); + log.info("Started test ZK server on at {}", zooKeeperServerEmbedded.getConnectionString()); + + ZooKeeperServerMain zooKeeperServerMain = getZooKeeperServerMain(zooKeeperServerEmbedded); + ServerCnxnFactory serverCnxnFactory = getServerCnxnFactory(zooKeeperServerMain); + // save the port, in order to allow restarting on the same port + zkPort = serverCnxnFactory.getLocalPort(); boolean zkServerReady = waitForServerUp(this.getConnectionString(), 30_000); assertTrue(zkServerReady); + } - this.containerManager = new ContainerManager(zks.getZKDatabase(), new RequestProcessor() { - @Override - public void processRequest(Request request) throws RequestProcessorException { - String path = StandardCharsets.UTF_8.decode(request.request).toString(); - try { - zks.getZKDatabase().getDataTree().deleteNode(path, -1); - } catch (KeeperException.NoNodeException e) { - // Ok - } - } + @SneakyThrows + private static ZooKeeperServerMain getZooKeeperServerMain(ZooKeeperServerEmbedded zooKeeperServerEmbedded) { + ZooKeeperServerMain zooKeeperServerMain = readField(zooKeeperServerEmbedded.getClass(), + "mainsingle", zooKeeperServerEmbedded); + return zooKeeperServerMain; + } - @Override - public void shutdown() { + @SneakyThrows + private static ContainerManager getContainerManager(ZooKeeperServerMain zooKeeperServerMain) { + ContainerManager containerManager = readField(ZooKeeperServerMain.class, "containerManager", zooKeeperServerMain); + return containerManager; + } - } - }, 10, 10000, 0L); + @SneakyThrows + private static ZooKeeperServer getZooKeeperServer(ZooKeeperServerMain zooKeeperServerMain) { + ServerCnxnFactory serverCnxnFactory = getServerCnxnFactory(zooKeeperServerMain); + ZooKeeperServer zkServer = readField(ServerCnxnFactory.class, "zkServer", serverCnxnFactory); + return zkServer; + } + + @SneakyThrows + private static T readField(Class clazz, String field, Object object) { + Field declaredField = clazz.getDeclaredField(field); + boolean accessible = declaredField.isAccessible(); + if (!accessible) { + declaredField.setAccessible(true); + } + try { + return (T) declaredField.get(object); + } finally { + declaredField.setAccessible(accessible); + } + } + + private static ServerCnxnFactory getServerCnxnFactory(ZooKeeperServerMain zooKeeperServerMain) throws Exception { + ServerCnxnFactory serverCnxnFactory = readField(ZooKeeperServerMain.class, "cnxnFactory", zooKeeperServerMain); + return serverCnxnFactory; } public void checkContainers() throws Exception { // Make sure the container nodes are actually deleted Thread.sleep(1000); + ContainerManager containerManager = getContainerManager(getZooKeeperServerMain(zooKeeperServerEmbedded)); containerManager.checkContainers(); } public void stop() throws Exception { - if (containerManager != null) { - containerManager.stop(); - containerManager = null; - } - - if (serverFactory != null) { - serverFactory.shutdown(); - serverFactory = null; - } - - if (zks != null) { - SessionTracker sessionTracker = zks.getSessionTracker(); - zks.shutdown(); - zks.getZKDatabase().close(); - if (sessionTracker instanceof Thread) { - Thread sessionTrackerThread = (Thread) sessionTracker; - sessionTrackerThread.interrupt(); - sessionTrackerThread.join(); - } - zks = null; + if (zooKeeperServerEmbedded != null) { + zooKeeperServerEmbedded.close(); } - log.info("Stopped test ZK server"); } public void expireSession(long sessionId) { - zks.expire(new SessionTracker.Session() { - @Override - public long getSessionId() { - return sessionId; - } - - @Override - public int getTimeout() { - return 10_000; - } - - @Override - public boolean isClosing() { - return false; - } - }); + getZooKeeperServer(getZooKeeperServerMain(zooKeeperServerEmbedded)) + .expire(sessionId); } @Override @@ -152,12 +146,9 @@ public void close() throws Exception { FileUtils.deleteDirectory(zkDataDir); } - public int getPort() { - return zkPort; - } - + @SneakyThrows public String getConnectionString() { - return "127.0.0.1:" + getPort(); + return zooKeeperServerEmbedded.getConnectionString(); } public static boolean waitForServerUp(String hp, long timeout) { diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/FaultInjectableZKRegistrationManager.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/FaultInjectableZKRegistrationManager.java new file mode 100644 index 0000000000000..bcbf41addbae3 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/FaultInjectableZKRegistrationManager.java @@ -0,0 +1,630 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.metadata.bookkeeper; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.bookkeeper.util.BookKeeperConstants.AVAILABLE_NODE; +import static org.apache.bookkeeper.util.BookKeeperConstants.COOKIE_NODE; +import static org.apache.bookkeeper.util.BookKeeperConstants.EMPTY_BYTE_ARRAY; +import static org.apache.bookkeeper.util.BookKeeperConstants.INSTANCEID; +import static org.apache.bookkeeper.util.BookKeeperConstants.READONLY; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.bookie.BookieException.BookieIllegalOpException; +import org.apache.bookkeeper.bookie.BookieException.CookieExistException; +import org.apache.bookkeeper.bookie.BookieException.CookieNotFoundException; +import org.apache.bookkeeper.bookie.BookieException.MetadataStoreException; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BKException.BKInterruptedException; +import org.apache.bookkeeper.client.BKException.MetaStoreException; +import org.apache.bookkeeper.common.concurrent.FutureUtils; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.discover.BookieServiceInfo; +import org.apache.bookkeeper.discover.RegistrationClient; +import org.apache.bookkeeper.discover.RegistrationManager; +import org.apache.bookkeeper.discover.ZKRegistrationClient; +import org.apache.bookkeeper.meta.AbstractZkLedgerManagerFactory; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.ZkLayoutManager; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.DataFormats.BookieServiceInfoFormat; +import org.apache.bookkeeper.util.BookKeeperConstants; +import org.apache.bookkeeper.util.ZkUtils; +import org.apache.bookkeeper.versioning.LongVersion; +import org.apache.bookkeeper.versioning.Version; +import org.apache.bookkeeper.versioning.Versioned; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; +import org.apache.zookeeper.KeeperException.NodeExistsException; +import org.apache.zookeeper.Op; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.Watcher.Event.EventType; +import org.apache.zookeeper.Watcher.Event.KeeperState; +import org.apache.zookeeper.ZKUtil; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; + +/** + * Fault injectable ZK registration manager. + * Copy from #{@link org.apache.bookkeeper.discover.ZKRegistrationManager}. + */ +@Slf4j +public class FaultInjectableZKRegistrationManager implements RegistrationManager { + + private static final Function EXCEPTION_FUNC = cause -> { + if (cause instanceof BKException) { + log.error("Failed to get bookie list : ", cause); + return (BKException) cause; + } else if (cause instanceof InterruptedException) { + log.error("Interrupted reading bookie list : ", cause); + return new BKInterruptedException(); + } else { + return new MetaStoreException(); + } + }; + + private final ServerConfiguration conf; + private final ZooKeeper zk; + private final List zkAcls; + private final LayoutManager layoutManager; + + private volatile boolean zkRegManagerInitialized = false; + + // ledgers root path + private final String ledgersRootPath; + // cookie path + private final String cookiePath; + // registration paths + protected final String bookieRegistrationPath; + protected final String bookieReadonlyRegistrationPath; + // session timeout in milliseconds + private final int zkTimeoutMs; + private final List listeners = new ArrayList<>(); + private Function hookOnRegisterReadOnly; + + public FaultInjectableZKRegistrationManager(ServerConfiguration conf, + ZooKeeper zk) { + this(conf, zk, ZKMetadataDriverBase.resolveZkLedgersRootPath(conf)); + } + + public FaultInjectableZKRegistrationManager(ServerConfiguration conf, + ZooKeeper zk, + String ledgersRootPath) { + this.conf = conf; + this.zk = zk; + this.zkAcls = ZkUtils.getACLs(conf); + this.ledgersRootPath = ledgersRootPath; + this.cookiePath = ledgersRootPath + "/" + COOKIE_NODE; + this.bookieRegistrationPath = ledgersRootPath + "/" + AVAILABLE_NODE; + this.bookieReadonlyRegistrationPath = this.bookieRegistrationPath + "/" + READONLY; + this.zkTimeoutMs = conf.getZkTimeout(); + + this.layoutManager = new ZkLayoutManager( + zk, + ledgersRootPath, + zkAcls); + + this.zk.register(event -> { + if (!zkRegManagerInitialized) { + // do nothing until first registration + return; + } + // Check for expired connection. + if (event.getType().equals(EventType.None) + && event.getState().equals(KeeperState.Expired)) { + listeners.forEach(RegistrationListener::onRegistrationExpired); + } + }); + } + + @Override + public void close() { + // no-op + } + + /** + * Returns the CookiePath of the bookie in the ZooKeeper. + * + * @param bookieId bookie id + * @return + */ + public String getCookiePath(BookieId bookieId) { + return this.cookiePath + "/" + bookieId; + } + + // + // Registration Management + // + + /** + * Check existence of regPath and wait it expired if possible. + * + * @param regPath reg node path. + * @return true if regPath exists, otherwise return false + * @throws IOException if can't create reg path + */ + protected boolean checkRegNodeAndWaitExpired(String regPath) throws IOException { + final CountDownLatch prevNodeLatch = new CountDownLatch(1); + Watcher zkPrevRegNodewatcher = new Watcher() { + @Override + public void process(WatchedEvent event) { + // Check for prev znode deletion. Connection expiration is + // not handling, since bookie has logic to shutdown. + if (EventType.NodeDeleted == event.getType()) { + prevNodeLatch.countDown(); + } + } + }; + try { + Stat stat = zk.exists(regPath, zkPrevRegNodewatcher); + if (null != stat) { + // if the ephemeral owner isn't current zookeeper client + // wait for it to be expired. + if (stat.getEphemeralOwner() != zk.getSessionId()) { + log.info("Previous bookie registration znode: {} exists, so waiting zk sessiontimeout:" + + " {} ms for znode deletion", regPath, zkTimeoutMs); + // waiting for the previous bookie reg znode deletion + if (!prevNodeLatch.await(zkTimeoutMs, TimeUnit.MILLISECONDS)) { + throw new NodeExistsException(regPath); + } else { + return false; + } + } + return true; + } else { + return false; + } + } catch (KeeperException ke) { + log.error("ZK exception checking and wait ephemeral znode {} expired : ", regPath, ke); + throw new IOException("ZK exception checking and wait ephemeral znode " + + regPath + " expired", ke); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + log.error("Interrupted checking and wait ephemeral znode {} expired : ", regPath, ie); + throw new IOException("Interrupted checking and wait ephemeral znode " + + regPath + " expired", ie); + } + } + + @Override + public void registerBookie(BookieId bookieId, boolean readOnly, + BookieServiceInfo bookieServiceInfo) throws BookieException { + if (!readOnly) { + String regPath = bookieRegistrationPath + "/" + bookieId; + doRegisterBookie(regPath, bookieServiceInfo); + } else { + doRegisterReadOnlyBookie(bookieId, bookieServiceInfo); + } + } + + @VisibleForTesting + static byte[] serializeBookieServiceInfo(BookieServiceInfo bookieServiceInfo) { + if (log.isDebugEnabled()) { + log.debug("serialize BookieServiceInfo {}", bookieServiceInfo); + } + try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { + BookieServiceInfoFormat.Builder builder = BookieServiceInfoFormat.newBuilder(); + List bsiEndpoints = bookieServiceInfo.getEndpoints().stream() + .map(e -> { + return BookieServiceInfoFormat.Endpoint.newBuilder() + .setId(e.getId()) + .setPort(e.getPort()) + .setHost(e.getHost()) + .setProtocol(e.getProtocol()) + .addAllAuth(e.getAuth()) + .addAllExtensions(e.getExtensions()) + .build(); + }) + .collect(Collectors.toList()); + + builder.addAllEndpoints(bsiEndpoints); + builder.putAllProperties(bookieServiceInfo.getProperties()); + + builder.build().writeTo(os); + return os.toByteArray(); + } catch (IOException err) { + log.error("Cannot serialize bookieServiceInfo from " + bookieServiceInfo); + throw new RuntimeException(err); + } + } + + private void doRegisterBookie(String regPath, BookieServiceInfo bookieServiceInfo) throws BookieException { + // ZK ephemeral node for this Bookie. + try { + if (!checkRegNodeAndWaitExpired(regPath)) { + // Create the ZK ephemeral node for this Bookie. + zk.create(regPath, serializeBookieServiceInfo(bookieServiceInfo), zkAcls, CreateMode.EPHEMERAL); + zkRegManagerInitialized = true; + } + } catch (KeeperException ke) { + log.error("ZK exception registering ephemeral Znode for Bookie!", ke); + // Throw an IOException back up. This will cause the Bookie + // constructor to error out. Alternatively, we could do a System + // exit here as this is a fatal error. + throw new MetadataStoreException(ke); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + log.error("Interrupted exception registering ephemeral Znode for Bookie!", ie); + // Throw an IOException back up. This will cause the Bookie + // constructor to error out. Alternatively, we could do a System + // exit here as this is a fatal error. + throw new MetadataStoreException(ie); + } catch (IOException e) { + throw new MetadataStoreException(e); + } + } + + private void doRegisterReadOnlyBookie(BookieId bookieId, BookieServiceInfo bookieServiceInfo) + throws BookieException { + try { + if (null == zk.exists(this.bookieReadonlyRegistrationPath, false)) { + try { + zk.create(this.bookieReadonlyRegistrationPath, serializeBookieServiceInfo(bookieServiceInfo), + zkAcls, CreateMode.PERSISTENT); + } catch (NodeExistsException e) { + // this node is just now created by someone. + } + } + String regPath = bookieReadonlyRegistrationPath + "/" + bookieId; + doRegisterBookie(regPath, bookieServiceInfo); + // clear the write state + regPath = bookieRegistrationPath + "/" + bookieId; + try { + if (hookOnRegisterReadOnly != null) { + hookOnRegisterReadOnly.apply(null); + } + // Clear the current registered node + zk.delete(regPath, -1); + } catch (KeeperException.NoNodeException nne) { + log.warn("No writable bookie registered node {} when transitioning to readonly", + regPath, nne); + } + } catch (KeeperException | InterruptedException e) { + throw new MetadataStoreException(e); + } + } + + @Override + public void unregisterBookie(BookieId bookieId, boolean readOnly) throws BookieException { + String regPath; + if (!readOnly) { + regPath = bookieRegistrationPath + "/" + bookieId; + } else { + regPath = bookieReadonlyRegistrationPath + "/" + bookieId; + } + doUnregisterBookie(regPath); + } + + private void doUnregisterBookie(String regPath) throws BookieException { + try { + zk.delete(regPath, -1); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new MetadataStoreException(ie); + } catch (KeeperException e) { + throw new MetadataStoreException(e); + } + } + + // + // Cookie Management + // + + @Override + public void writeCookie(BookieId bookieId, + Versioned cookieData) throws BookieException { + String zkPath = getCookiePath(bookieId); + try { + if (Version.NEW == cookieData.getVersion()) { + if (zk.exists(cookiePath, false) == null) { + try { + zk.create(cookiePath, new byte[0], zkAcls, CreateMode.PERSISTENT); + } catch (NodeExistsException nne) { + log.info("More than one bookie tried to create {} at once. Safe to ignore.", + cookiePath); + } + } + zk.create(zkPath, cookieData.getValue(), zkAcls, CreateMode.PERSISTENT); + } else { + if (!(cookieData.getVersion() instanceof LongVersion)) { + throw new BookieIllegalOpException("Invalid version type, expected it to be LongVersion"); + } + zk.setData( + zkPath, + cookieData.getValue(), + (int) ((LongVersion) cookieData.getVersion()).getLongVersion()); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new MetadataStoreException("Interrupted writing cookie for bookie " + bookieId, ie); + } catch (NoNodeException nne) { + throw new CookieNotFoundException(bookieId.toString()); + } catch (NodeExistsException nee) { + throw new CookieExistException(bookieId.toString()); + } catch (KeeperException e) { + throw new MetadataStoreException("Failed to write cookie for bookie " + bookieId); + } + } + + @Override + public Versioned readCookie(BookieId bookieId) throws BookieException { + String zkPath = getCookiePath(bookieId); + try { + Stat stat = zk.exists(zkPath, false); + byte[] data = zk.getData(zkPath, false, stat); + // sets stat version from ZooKeeper + LongVersion version = new LongVersion(stat.getVersion()); + return new Versioned<>(data, version); + } catch (NoNodeException nne) { + throw new CookieNotFoundException(bookieId.toString()); + } catch (KeeperException | InterruptedException e) { + throw new MetadataStoreException("Failed to read cookie for bookie " + bookieId); + } + } + + @Override + public void removeCookie(BookieId bookieId, Version version) throws BookieException { + String zkPath = getCookiePath(bookieId); + try { + zk.delete(zkPath, (int) ((LongVersion) version).getLongVersion()); + } catch (NoNodeException e) { + throw new CookieNotFoundException(bookieId.toString()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new MetadataStoreException("Interrupted deleting cookie for bookie " + bookieId, e); + } catch (KeeperException e) { + throw new MetadataStoreException("Failed to delete cookie for bookie " + bookieId); + } + + log.info("Removed cookie from {} for bookie {}.", cookiePath, bookieId); + } + + + @Override + public String getClusterInstanceId() throws BookieException { + String instanceId = null; + try { + if (zk.exists(ledgersRootPath, null) == null) { + log.error("BookKeeper metadata doesn't exist in zookeeper. " + + "Has the cluster been initialized? " + + "Try running bin/bookkeeper shell metaformat"); + throw new KeeperException.NoNodeException("BookKeeper metadata"); + } + try { + byte[] data = zk.getData(ledgersRootPath + "/" + + INSTANCEID, false, null); + instanceId = new String(data, UTF_8); + } catch (KeeperException.NoNodeException e) { + log.info("INSTANCEID not exists in zookeeper. Not considering it for data verification"); + } + } catch (KeeperException | InterruptedException e) { + throw new MetadataStoreException("Failed to get cluster instance id", e); + } + return instanceId; + } + + @Override + public boolean prepareFormat() throws Exception { + boolean ledgerRootExists = null != zk.exists(ledgersRootPath, false); + boolean availableNodeExists = null != zk.exists(bookieRegistrationPath, false); + // Create ledgers root node if not exists + if (!ledgerRootExists) { + ZkUtils.createFullPathOptimistic(zk, ledgersRootPath, "".getBytes(StandardCharsets.UTF_8), zkAcls, + CreateMode.PERSISTENT); + } + // create available bookies node if not exists + if (!availableNodeExists) { + zk.create(bookieRegistrationPath, "".getBytes(StandardCharsets.UTF_8), zkAcls, CreateMode.PERSISTENT); + } + + // create readonly bookies node if not exists + if (null == zk.exists(bookieReadonlyRegistrationPath, false)) { + zk.create(bookieReadonlyRegistrationPath, new byte[0], zkAcls, CreateMode.PERSISTENT); + } + + return ledgerRootExists; + } + + @Override + public boolean initNewCluster() throws Exception { + String zkServers = ZKMetadataDriverBase.resolveZkServers(conf); + String instanceIdPath = ledgersRootPath + "/" + INSTANCEID; + log.info("Initializing ZooKeeper metadata for new cluster, ZKServers: {} ledger root path: {}", zkServers, + ledgersRootPath); + + boolean ledgerRootExists = null != zk.exists(ledgersRootPath, false); + + if (ledgerRootExists) { + log.error("Ledger root path: {} already exists", ledgersRootPath); + return false; + } + + List multiOps = Lists.newArrayListWithExpectedSize(4); + + // Create ledgers root node + multiOps.add(Op.create(ledgersRootPath, EMPTY_BYTE_ARRAY, zkAcls, CreateMode.PERSISTENT)); + + // create available bookies node + multiOps.add(Op.create(bookieRegistrationPath, EMPTY_BYTE_ARRAY, zkAcls, CreateMode.PERSISTENT)); + + // create readonly bookies node + multiOps.add(Op.create( + bookieReadonlyRegistrationPath, + EMPTY_BYTE_ARRAY, + zkAcls, + CreateMode.PERSISTENT)); + + // create INSTANCEID + String instanceId = UUID.randomUUID().toString(); + multiOps.add(Op.create(instanceIdPath, instanceId.getBytes(UTF_8), + zkAcls, CreateMode.PERSISTENT)); + + // execute the multi ops + zk.multi(multiOps); + + // creates the new layout and stores in zookeeper + AbstractZkLedgerManagerFactory.newLedgerManagerFactory(conf, layoutManager); + + log.info("Successfully initiated cluster. ZKServers: {} ledger root path: {} instanceId: {}", zkServers, + ledgersRootPath, instanceId); + return true; + } + + @Override + public boolean nukeExistingCluster() throws Exception { + String zkServers = ZKMetadataDriverBase.resolveZkServers(conf); + log.info("Nuking ZooKeeper metadata of existing cluster, ZKServers: {} ledger root path: {}", + zkServers, ledgersRootPath); + + boolean ledgerRootExists = null != zk.exists(ledgersRootPath, false); + if (!ledgerRootExists) { + log.info("There is no existing cluster with ledgersRootPath: {} in ZKServers: {}, " + + "so exiting nuke operation", ledgersRootPath, zkServers); + return true; + } + + boolean availableNodeExists = null != zk.exists(bookieRegistrationPath, false); + try (RegistrationClient regClient = new ZKRegistrationClient( + zk, + ledgersRootPath, + null, + false + )) { + if (availableNodeExists) { + Collection rwBookies = FutureUtils + .result(regClient.getWritableBookies(), EXCEPTION_FUNC).getValue(); + if (rwBookies != null && !rwBookies.isEmpty()) { + log.error("Bookies are still up and connected to this cluster, " + + "stop all bookies before nuking the cluster"); + return false; + } + + boolean readonlyNodeExists = null != zk.exists(bookieReadonlyRegistrationPath, false); + if (readonlyNodeExists) { + Collection roBookies = FutureUtils + .result(regClient.getReadOnlyBookies(), EXCEPTION_FUNC).getValue(); + if (roBookies != null && !roBookies.isEmpty()) { + log.error("Readonly Bookies are still up and connected to this cluster, " + + "stop all bookies before nuking the cluster"); + return false; + } + } + } + } + + LedgerManagerFactory ledgerManagerFactory = + AbstractZkLedgerManagerFactory.newLedgerManagerFactory(conf, layoutManager); + return ledgerManagerFactory.validateAndNukeExistingCluster(conf, layoutManager); + } + + @Override + public boolean format() throws Exception { + // Clear underreplicated ledgers + try { + ZKUtil.deleteRecursive(zk, ZkLedgerUnderreplicationManager.getBasePath(ledgersRootPath) + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH); + } catch (KeeperException.NoNodeException e) { + if (log.isDebugEnabled()) { + log.debug("underreplicated ledgers root path node not exists in zookeeper to delete"); + } + } + + // Clear underreplicatedledger locks + try { + ZKUtil.deleteRecursive(zk, ZkLedgerUnderreplicationManager.getBasePath(ledgersRootPath) + '/' + + BookKeeperConstants.UNDER_REPLICATION_LOCK); + } catch (KeeperException.NoNodeException e) { + if (log.isDebugEnabled()) { + log.debug("underreplicatedledger locks node not exists in zookeeper to delete"); + } + } + + // Clear the cookies + try { + ZKUtil.deleteRecursive(zk, cookiePath); + } catch (KeeperException.NoNodeException e) { + if (log.isDebugEnabled()) { + log.debug("cookies node not exists in zookeeper to delete"); + } + } + + // Clear the INSTANCEID + try { + zk.delete(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID, -1); + } catch (KeeperException.NoNodeException e) { + if (log.isDebugEnabled()) { + log.debug("INSTANCEID not exists in zookeeper to delete"); + } + } + + // create INSTANCEID + String instanceId = UUID.randomUUID().toString(); + zk.create(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID, + instanceId.getBytes(StandardCharsets.UTF_8), zkAcls, CreateMode.PERSISTENT); + + log.info("Successfully formatted BookKeeper metadata"); + return true; + } + + @Override + public boolean isBookieRegistered(BookieId bookieId) throws BookieException { + String regPath = bookieRegistrationPath + "/" + bookieId; + String readonlyRegPath = bookieReadonlyRegistrationPath + "/" + bookieId; + try { + return ((null != zk.exists(regPath, false)) || (null != zk.exists(readonlyRegPath, false))); + } catch (KeeperException e) { + log.error("ZK exception while checking registration ephemeral znodes for BookieId: {}", bookieId, e); + throw new MetadataStoreException(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.error("InterruptedException while checking registration ephemeral znodes for BookieId: {}", bookieId, + e); + throw new MetadataStoreException(e); + } + } + + @Override + public void addRegistrationListener(RegistrationListener listener) { + listeners.add(listener); + } + + public void betweenRegisterReadOnlyBookie(Function fn) { + hookOnRegisterReadOnly = fn; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java index 0df325b3c57a0..0e9c781fb9143 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java @@ -23,12 +23,14 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.protobuf.TextFormat; +import java.lang.reflect.Field; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -38,6 +40,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import lombok.Cleanup; @@ -54,6 +57,7 @@ import org.apache.bookkeeper.util.BookKeeperConstants; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.metadata.BaseMetadataStoreTest; +import org.apache.pulsar.metadata.api.GetResult; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; @@ -296,6 +300,69 @@ public void testMarkingAsReplicated(String provider, Supplier urlSupplie assertEquals(l, lB.get(), "Should be the ledger I marked"); } + + @Test(timeOut = 10000) + public void testZkMetasStoreMarkReplicatedDeleteEmptyParentNodes() throws Exception { + methodSetup(stringSupplier(() -> zks.getConnectionString())); + + String missingReplica = "localhost:3181"; + + @Cleanup + LedgerUnderreplicationManager m1 = lmf.newLedgerUnderreplicationManager(); + + Long ledgerA = 0xfeadeefdacL; + m1.markLedgerUnderreplicated(ledgerA, missingReplica); + + Field storeField = m1.getClass().getDeclaredField("store"); + storeField.setAccessible(true); + MetadataStoreExtended metadataStore = (MetadataStoreExtended) storeField.get(m1); + + String fiveLevelPath = PulsarLedgerUnderreplicationManager.getUrLedgerPath(urLedgerPath, ledgerA); + Optional getResult = metadataStore.get(fiveLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String fourLevelPath = fiveLevelPath.substring(0, fiveLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(fourLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String threeLevelPath = fourLevelPath.substring(0, fourLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(threeLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String twoLevelPath = fourLevelPath.substring(0, threeLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(twoLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String oneLevelPath = fourLevelPath.substring(0, twoLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(oneLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + getResult = metadataStore.get(urLedgerPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + long ledgerToRereplicate = m1.getLedgerToRereplicate(); + assertEquals(ledgerToRereplicate, ledgerA); + m1.markLedgerReplicated(ledgerA); + + getResult = metadataStore.get(fiveLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(fourLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(threeLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(twoLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(oneLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(urLedgerPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + } + /** * Test releasing of a ledger * A ledger is released when a client decides it does not want @@ -548,6 +615,8 @@ public void testDisableLedgerReplication(String provider, Supplier urlSu final String missingReplica = "localhost:3181"; // disabling replication + AtomicInteger callbackCount = new AtomicInteger(); + lum.notifyLedgerReplicationEnabled((rc, result) -> callbackCount.incrementAndGet()); lum.disableLedgerReplication(); log.info("Disabled Ledeger Replication"); @@ -565,6 +634,7 @@ public void testDisableLedgerReplication(String provider, Supplier urlSu } catch (TimeoutException te) { // expected behaviour, as the replication is disabled } + assertEquals(callbackCount.get(), 1, "Notify callback times mismatch"); } /** @@ -585,7 +655,8 @@ public void testEnableLedgerReplication(String provider, Supplier urlSup log.debug("Unexpected exception while marking urLedger", e); fail("Unexpected exception while marking urLedger" + e.getMessage()); } - + AtomicInteger callbackCount = new AtomicInteger(); + lum.notifyLedgerReplicationEnabled((rc, result) -> callbackCount.incrementAndGet()); // disabling replication lum.disableLedgerReplication(); log.debug("Disabled Ledeger Replication"); @@ -622,6 +693,7 @@ public void testEnableLedgerReplication(String provider, Supplier urlSup znodeLatch.await(5, TimeUnit.SECONDS); log.debug("Enabled Ledeger Replication"); assertEquals(znodeLatch.getCount(), 0, "Failed to disable ledger replication!"); + assertEquals(callbackCount.get(), 2, "Notify callback times mismatch"); } finally { thread1.interrupt(); } @@ -683,6 +755,17 @@ public void testReplicasCheckCTime(String provider, Supplier urlSupplier assertEquals(underReplicaMgr1.getReplicasCheckCTime(), curTime); } + @Test(timeOut = 60000, dataProvider = "impl") + public void testLostBookieRecoveryDelay(String provider, Supplier urlSupplier) throws Exception { + methodSetup(urlSupplier); + + AtomicInteger callbackCount = new AtomicInteger(); + lum.notifyLostBookieRecoveryDelayChanged((rc, result) -> callbackCount.incrementAndGet()); + // disabling replication + lum.setLostBookieRecoveryDelay(10); + Awaitility.await().until(() -> callbackCount.get() == 2); + } + private void verifyMarkLedgerUnderreplicated(Collection missingReplica) throws Exception { Long ledgerA = 0xfeadeefdacL; String znodeA = getUrLedgerZnode(ledgerA); diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClientTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClientTest.java index f599451c00710..4dcbcda3d9078 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClientTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationClientTest.java @@ -42,9 +42,8 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.conf.AbstractConfiguration; -import org.apache.bookkeeper.discover.BookieServiceInfo; -import org.apache.bookkeeper.discover.RegistrationClient; -import org.apache.bookkeeper.discover.RegistrationManager; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.discover.*; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.versioning.Version; @@ -52,6 +51,7 @@ import org.apache.pulsar.metadata.BaseMetadataStoreTest; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.zookeeper.ZooKeeper; import org.awaitility.Awaitility; import org.testng.annotations.Test; @@ -126,7 +126,7 @@ public void testGetReadonlyBookies(String provider, Supplier urlSupplier public void testGetBookieServiceInfo(String provider, Supplier urlSupplier) throws Exception { @Cleanup MetadataStoreExtended store = MetadataStoreExtended.create(urlSupplier.get(), - MetadataStoreConfig.builder().fsyncEnable(false).build()); + MetadataStoreConfig.builder().fsyncEnable(false).build()); String ledgersRoot = "/test/ledgers-" + UUID.randomUUID(); @@ -168,10 +168,10 @@ public void testGetBookieServiceInfo(String provider, Supplier urlSuppli getAndVerifyAllBookies(rc, addresses); Awaitility.await().untilAsserted(() -> { - for (BookieId address : addresses) { - BookieServiceInfo bookieServiceInfo = rc.getBookieServiceInfo(address).get().getValue(); - compareBookieServiceInfo(bookieServiceInfo, bookieServiceInfos.get(address)); - }}); + for (BookieId address : addresses) { + BookieServiceInfo bookieServiceInfo = rc.getBookieServiceInfo(address).get().getValue(); + compareBookieServiceInfo(bookieServiceInfo, bookieServiceInfos.get(address)); + }}); // shutdown the bookies (but keep the cookie) for (BookieId address : addresses) { @@ -184,12 +184,12 @@ public void testGetBookieServiceInfo(String provider, Supplier urlSuppli // getBookieServiceInfo should fail with BKBookieHandleNotAvailableException Awaitility.await().untilAsserted(() -> { - for (BookieId address : addresses) { - assertTrue( - expectThrows(ExecutionException.class, () -> { - rc.getBookieServiceInfo(address).get(); - }).getCause() instanceof BKException.BKBookieHandleNotAvailableException); - }}); + for (BookieId address : addresses) { + assertTrue( + expectThrows(ExecutionException.class, () -> { + rc.getBookieServiceInfo(address).get(); + }).getCause() instanceof BKException.BKBookieHandleNotAvailableException); + }}); // restart the bookies, all writable @@ -241,12 +241,12 @@ public void testGetBookieServiceInfo(String provider, Supplier urlSuppli .await() .ignoreExceptionsMatching(e -> e.getCause() instanceof BKException.BKBookieHandleNotAvailableException) .untilAsserted(() -> { - // verify that infos are updated - for (BookieId address : addresses) { - BookieServiceInfo bookieServiceInfo = rc.getBookieServiceInfo(address).get().getValue(); - compareBookieServiceInfo(bookieServiceInfo, bookieServiceInfos.get(address)); - } - }); + // verify that infos are updated + for (BookieId address : addresses) { + BookieServiceInfo bookieServiceInfo = rc.getBookieServiceInfo(address).get().getValue(); + compareBookieServiceInfo(bookieServiceInfo, bookieServiceInfos.get(address)); + } + }); } @@ -318,7 +318,7 @@ private void testWatchBookiesSuccess(String provider, Supplier urlSuppli @Cleanup MetadataStoreExtended store = MetadataStoreExtended.create(urlSupplier.get(), - MetadataStoreConfig.builder().fsyncEnable(false).build()); + MetadataStoreConfig.builder().fsyncEnable(false).build()); String ledgersRoot = "/test/ledgers-" + UUID.randomUUID(); @@ -357,4 +357,88 @@ private void testWatchBookiesSuccess(String provider, Supplier urlSuppli }); } + + @Test + public void testNetworkDelayWithBkZkManager() throws Throwable { + final String zksConnectionString = zks.getConnectionString(); + final String ledgersRoot = "/test/ledgers-" + UUID.randomUUID(); + // prepare registration manager + ZooKeeper zk = new ZooKeeper(zksConnectionString, 5000, null); + final ServerConfiguration serverConfiguration = new ServerConfiguration(); + serverConfiguration.setZkLedgersRootPath(ledgersRoot); + final FaultInjectableZKRegistrationManager rm = new FaultInjectableZKRegistrationManager(serverConfiguration, zk); + rm.prepareFormat(); + // prepare registration client + @Cleanup + MetadataStoreExtended store = MetadataStoreExtended.create(zksConnectionString, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + @Cleanup + RegistrationClient rc1 = new PulsarRegistrationClient(store, ledgersRoot); + @Cleanup + RegistrationClient rc2 = new PulsarRegistrationClient(store, ledgersRoot); + + final List addresses = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + addresses.add(BookieId.parse("BOOKIE-" + i)); + } + final Map bookieServiceInfos = new HashMap<>(); + + int port = 223; + for (BookieId address : addresses) { + BookieServiceInfo info = new BookieServiceInfo(); + BookieServiceInfo.Endpoint endpoint = new BookieServiceInfo.Endpoint(); + endpoint.setAuth(Collections.emptyList()); + endpoint.setExtensions(Collections.emptyList()); + endpoint.setId("id"); + endpoint.setHost("localhost"); + endpoint.setPort(port++); + endpoint.setProtocol("bookie-rpc"); + info.setEndpoints(Arrays.asList(endpoint)); + bookieServiceInfos.put(address, info); + rm.registerBookie(address, false, info); + // write the cookie + rm.writeCookie(address, new Versioned<>(new byte[0], Version.NEW)); + } + + // trigger loading the BookieServiceInfo in the local cache + getAndVerifyAllBookies(rc1, addresses); + getAndVerifyAllBookies(rc2, addresses); + + Awaitility.await().untilAsserted(() -> { + for (BookieId address : addresses) { + compareBookieServiceInfo(rc1.getBookieServiceInfo(address).get().getValue(), + bookieServiceInfos.get(address)); + compareBookieServiceInfo(rc2.getBookieServiceInfo(address).get().getValue(), + bookieServiceInfos.get(address)); + } + }); + + // verified the init status. + + + // mock network delay + rm.betweenRegisterReadOnlyBookie(__ -> { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return null; + }); + + for (int i = 0; i < addresses.size() / 2; i++) { + final BookieId bkId = addresses.get(i); + // turn some bookies to be read only. + rm.registerBookie(bkId, true, bookieServiceInfos.get(bkId)); + } + + Awaitility.await().untilAsserted(() -> { + for (BookieId address : addresses) { + compareBookieServiceInfo(rc1.getBookieServiceInfo(address).get().getValue(), + bookieServiceInfos.get(address)); + compareBookieServiceInfo(rc2.getBookieServiceInfo(address).get().getValue(), + bookieServiceInfos.get(address)); + } + }); + } } diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImplTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImplTest.java index 027521d2ffc17..09c9d71c41a30 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImplTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImplTest.java @@ -29,7 +29,7 @@ public class LeaderElectionImplTest extends BaseMetadataStoreTest { - @Test(dataProvider = "impl", timeOut = 10000) + @Test(dataProvider = "impl", timeOut = 20000) public void validateDeadLock(String provider, Supplier urlSupplier) throws Exception { if (provider.equals("Memory") || provider.equals("RocksDB")) { diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/impl/EtcdMetadataStoreTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/impl/EtcdMetadataStoreTest.java index f1a1b5626acce..bdcd0614d0375 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/impl/EtcdMetadataStoreTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/impl/EtcdMetadataStoreTest.java @@ -23,8 +23,8 @@ import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import com.google.common.io.Resources; import io.etcd.jetcd.launcher.EtcdCluster; -import io.etcd.jetcd.launcher.EtcdClusterFactory; +import io.etcd.jetcd.test.EtcdClusterExtension; import java.net.URI; import java.nio.charset.StandardCharsets; import java.nio.file.Files; @@ -44,7 +44,8 @@ public class EtcdMetadataStoreTest { @Test public void testCluster() throws Exception { @Cleanup - EtcdCluster etcdCluster = EtcdClusterFactory.buildCluster("test-cluster", 3, false); + EtcdCluster etcdCluster = EtcdClusterExtension.builder().withClusterName("test-cluster").withNodes(3) + .withSsl(false).build().cluster(); etcdCluster.start(); EtcdConfig etcdConfig = EtcdConfig.builder().useTls(false) @@ -56,7 +57,7 @@ public void testCluster() throws Exception { new ObjectMapper(new YAMLFactory()).writeValue(etcdConfigPath.toFile(), etcdConfig); String metadataURL = - "etcd:" + etcdCluster.getClientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); + "etcd:" + etcdCluster.clientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); @Cleanup MetadataStore store = MetadataStoreFactory.create(metadataURL, @@ -71,7 +72,8 @@ public void testCluster() throws Exception { @Test public void testClusterWithTls() throws Exception { @Cleanup - EtcdCluster etcdCluster = EtcdClusterFactory.buildCluster("test-cluster", 3, true); + EtcdCluster etcdCluster = EtcdClusterExtension.builder().withClusterName("test-cluster").withNodes(3) + .withSsl(true).build().cluster(); etcdCluster.start(); EtcdConfig etcdConfig = EtcdConfig.builder().useTls(true) @@ -86,7 +88,7 @@ public void testClusterWithTls() throws Exception { new ObjectMapper(new YAMLFactory()).writeValue(etcdConfigPath.toFile(), etcdConfig); String metadataURL = - "etcd:" + etcdCluster.getClientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); + "etcd:" + etcdCluster.clientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); @Cleanup MetadataStore store = MetadataStoreFactory.create(metadataURL, @@ -101,7 +103,8 @@ public void testClusterWithTls() throws Exception { @Test public void testTlsInstance() throws Exception { @Cleanup - EtcdCluster etcdCluster = EtcdClusterFactory.buildCluster("test-tls", 1, true); + EtcdCluster etcdCluster = EtcdClusterExtension.builder().withClusterName("test-tls").withNodes(1) + .withSsl(true).build().cluster(); etcdCluster.start(); EtcdConfig etcdConfig = EtcdConfig.builder().useTls(true) @@ -115,7 +118,7 @@ public void testTlsInstance() throws Exception { new ObjectMapper(new YAMLFactory()).writeValue(etcdConfigPath.toFile(), etcdConfig); String metadataURL = - "etcd:" + etcdCluster.getClientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); + "etcd:" + etcdCluster.clientEndpoints().stream().map(URI::toString).collect(Collectors.joining(",")); @Cleanup MetadataStore store = MetadataStoreFactory.create(metadataURL, diff --git a/pulsar-metadata/src/test/resources/findbugsExclude.xml b/pulsar-metadata/src/test/resources/findbugsExclude.xml index e40b2a0fd9a12..bf075c39791cf 100644 --- a/pulsar-metadata/src/test/resources/findbugsExclude.xml +++ b/pulsar-metadata/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - \ No newline at end of file + + + + + + + + + diff --git a/pulsar-package-management/bookkeeper-storage/pom.xml b/pulsar-package-management/bookkeeper-storage/pom.xml index 274e5abb4abec..17ee17d52c773 100644 --- a/pulsar-package-management/bookkeeper-storage/pom.xml +++ b/pulsar-package-management/bookkeeper-storage/pom.xml @@ -1,4 +1,4 @@ - + - - - pulsar-package-management - org.apache.pulsar - 3.0.0-SNAPSHOT - - 4.0.0 - - pulsar-package-bookkeeper-storage - Apache Pulsar :: Package Management :: BookKeeper Storage - - - - ${project.groupId} - pulsar-package-core - ${project.version} - - - - org.apache.distributedlog - distributedlog-core - - - net.jpountz.lz4 - lz4 - - - - - - org.apache.zookeeper - zookeeper - tests - test - - - ch.qos.logback - logback-core - - - ch.qos.logback - logback-classic - - - io.netty - netty-tcnative - - - - - - - io.dropwizard.metrics - metrics-core - test - - - - - org.xerial.snappy - snappy-java - test - - - - ${project.groupId} - managed-ledger - ${project.version} - test - - - - ${project.groupId} - testmocks - ${project.version} - - - + + + pulsar-package-management + io.streamnative + 3.0.0-SNAPSHOT + + 4.0.0 + pulsar-package-bookkeeper-storage + Apache Pulsar :: Package Management :: BookKeeper Storage + + + ${project.groupId} + pulsar-package-core + ${project.version} + + + org.apache.distributedlog + distributedlog-core + + + net.jpountz.lz4 + lz4 + + + + + org.apache.zookeeper + zookeeper + tests + test + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + + io.netty + netty-tcnative + + + + + org.hamcrest + hamcrest + test + + + + io.dropwizard.metrics + metrics-core + test + + + + org.xerial.snappy + snappy-java + test + + + ${project.groupId} + managed-ledger + ${project.version} + test + + + ${project.groupId} + testmocks + ${project.version} + + diff --git a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java index 40c2041d4e6c4..43db5ad4ba845 100644 --- a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java +++ b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java @@ -70,7 +70,6 @@ import org.apache.bookkeeper.meta.LedgerManager; import org.apache.bookkeeper.meta.LedgerManagerFactory; import org.apache.bookkeeper.meta.MetadataBookieDriver; -import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; import org.apache.bookkeeper.metastore.InMemoryMetaStore; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; @@ -486,7 +485,7 @@ public ServerConfiguration killBookie(int index) throws Exception { public ServerConfiguration killBookieAndWaitForZK(int index) throws Exception { ServerTester tester = servers.get(index); // IKTODO: this method is awful ServerConfiguration ret = killBookie(index); - while (zkc.exists(ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + "/" + AVAILABLE_NODE + "/" + while (zkc.exists("/ledgers/" + AVAILABLE_NODE + "/" + tester.getServer().getBookieId().toString(), false) != null) { Thread.sleep(500); } diff --git a/pulsar-package-management/core/pom.xml b/pulsar-package-management/core/pom.xml index a54d2b1d8ad54..f307b22a42f3d 100644 --- a/pulsar-package-management/core/pom.xml +++ b/pulsar-package-management/core/pom.xml @@ -1,4 +1,4 @@ - + - - - pulsar-package-management - org.apache.pulsar - 3.0.0-SNAPSHOT - - 4.0.0 - - pulsar-package-core - Apache Pulsar :: Package Management :: Core - - - - ${project.groupId} - pulsar-client-admin-api - ${project.parent.version} - - - - com.google.guava - guava - - - - org.apache.commons - commons-lang3 - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - - ${pulsar.client.compiler.release} - - - - org.gaul - modernizer-maven-plugin - - true - 17 - - - - modernizer - verify - - modernizer - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - + + + pulsar-package-management + io.streamnative + 3.0.0-SNAPSHOT + + 4.0.0 + pulsar-package-core + Apache Pulsar :: Package Management :: Core + + + ${project.groupId} + pulsar-client-admin-api + ${project.parent.version} + + + com.google.guava + guava + + + org.apache.commons + commons-lang3 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + ${pulsar.client.compiler.release} + + + + org.gaul + modernizer-maven-plugin + + true + 17 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + diff --git a/pulsar-package-management/core/src/main/resources/findbugsExclude.xml b/pulsar-package-management/core/src/main/resources/findbugsExclude.xml index 3a2e998dce984..bd8d2c4f77efa 100644 --- a/pulsar-package-management/core/src/main/resources/findbugsExclude.xml +++ b/pulsar-package-management/core/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - pulsar-package-management - org.apache.pulsar - 3.0.0-SNAPSHOT - - 4.0.0 - - pulsar-package-filesystem-storage - Apache Pulsar :: Package Management :: Filesystem Storage - - - - ${project.groupId} - pulsar-package-core - ${project.parent.version} - - - - com.google.guava - guava - - - - ${project.groupId} - testmocks - ${project.parent.version} - test - - - + + + pulsar-package-management + io.streamnative + 3.0.0-SNAPSHOT + + 4.0.0 + pulsar-package-filesystem-storage + Apache Pulsar :: Package Management :: Filesystem Storage + + + ${project.groupId} + pulsar-package-core + ${project.parent.version} + + + com.google.guava + guava + + + ${project.groupId} + testmocks + ${project.parent.version} + test + + diff --git a/pulsar-package-management/pom.xml b/pulsar-package-management/pom.xml index f05798ecb4bb3..a2f02a5d2c1e8 100644 --- a/pulsar-package-management/pom.xml +++ b/pulsar-package-management/pom.xml @@ -1,4 +1,4 @@ - + - - - pulsar - org.apache.pulsar - 3.0.0-SNAPSHOT - .. - - 4.0.0 - - pulsar-package-management - pom - Apache Pulsar :: Package Management - - core - bookkeeper-storage - filesystem-storage - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - - spotbugs - verify - - check - - - - - - + + + pulsar + io.streamnative + 3.0.0-SNAPSHOT + .. + + 4.0.0 + pulsar-package-management + pom + Apache Pulsar :: Package Management + + core + bookkeeper-storage + filesystem-storage + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-proxy/pom.xml b/pulsar-proxy/pom.xml index b9a68cba2c5e1..6cef823ccaa82 100644 --- a/pulsar-proxy/pom.xml +++ b/pulsar-proxy/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.0.0-SNAPSHOT - pulsar-proxy Pulsar Proxy - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-websocket ${project.version} - org.apache.commons commons-lang3 - org.eclipse.jetty jetty-server - org.eclipse.jetty jetty-alpn-conscrypt-server - org.eclipse.jetty jetty-servlet - org.eclipse.jetty jetty-servlets - org.eclipse.jetty jetty-proxy - org.glassfish.jersey.core jersey-server - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.media jersey-media-json-jackson - com.google.guava guava - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - org.glassfish.jersey.inject jersey-hk2 - javax.xml.bind jaxb-api - com.sun.activation javax.activation - io.prometheus simpleclient - io.prometheus simpleclient_hotspot - io.prometheus simpleclient_servlet - io.prometheus simpleclient_jetty - ${project.groupId} pulsar-broker ${project.version} test - ${project.groupId} testmocks ${project.version} test - org.awaitility awaitility test - com.beust jcommander - org.apache.logging.log4j log4j-core - com.github.seancfoley ipaddress @@ -185,6 +156,12 @@ testcontainers test + + com.github.tomakehurst + wiremock-jre8 + ${wiremock.version} + test + @@ -277,5 +254,4 @@ - diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java index 75059fc0d2551..95a3bf032fe21 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -49,6 +50,9 @@ public class ProxyExtensions implements AutoCloseable { * @return the collection of extensions */ public static ProxyExtensions load(ProxyConfiguration conf) throws IOException { + if (conf.getProxyExtensions().isEmpty()) { + return new ProxyExtensions(Collections.emptyMap()); + } ExtensionsDefinitions definitions = ProxyExtensionsUtils.searchForExtensions( conf.getProxyExtensionsDirectory(), conf.getNarExtractionDirectory()); diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java index d9dda9823ea89..c528ceb2cf5b7 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java @@ -41,8 +41,10 @@ import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.api.AuthenticationDataProvider; import org.apache.pulsar.client.api.AuthenticationFactory; +import org.apache.pulsar.client.api.KeyStoreParams; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.util.SecurityUtility; +import org.apache.pulsar.common.util.keystoretls.KeyStoreSSLContext; import org.apache.pulsar.policies.data.loadbalancer.ServiceLookupData; import org.eclipse.jetty.client.HttpClient; import org.eclipse.jetty.client.HttpRequest; @@ -269,20 +271,35 @@ protected HttpClient newHttpClient() { SSLContext sslCtx; AuthenticationDataProvider authData = auth.getAuthData(); - if (authData.hasDataForTls()) { - sslCtx = SecurityUtility.createSslContext( + if (config.isBrokerClientTlsEnabledWithKeyStore()) { + KeyStoreParams params = authData.hasDataForTls() ? authData.getTlsKeyStoreParams() : null; + sslCtx = KeyStoreSSLContext.createClientSslContext( + config.getBrokerClientSslProvider(), + params != null ? params.getKeyStoreType() : null, + params != null ? params.getKeyStorePath() : null, + params != null ? params.getKeyStorePassword() : null, config.isTlsAllowInsecureConnection(), - trustCertificates, - authData.getTlsCertificates(), - authData.getTlsPrivateKey(), - config.getBrokerClientSslProvider() - ); + config.getBrokerClientTlsTrustStoreType(), + config.getBrokerClientTlsTrustStore(), + config.getBrokerClientTlsTrustStorePassword(), + config.getBrokerClientTlsCiphers(), + config.getBrokerClientTlsProtocols()); } else { - sslCtx = SecurityUtility.createSslContext( - config.isTlsAllowInsecureConnection(), - trustCertificates, - config.getBrokerClientSslProvider() - ); + if (authData.hasDataForTls()) { + sslCtx = SecurityUtility.createSslContext( + config.isTlsAllowInsecureConnection(), + trustCertificates, + authData.getTlsCertificates(), + authData.getTlsPrivateKey(), + config.getBrokerClientSslProvider() + ); + } else { + sslCtx = SecurityUtility.createSslContext( + config.isTlsAllowInsecureConnection(), + trustCertificates, + config.getBrokerClientSslProvider() + ); + } } SslContextFactory contextFactory = new SslContextFactory.Client(); diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java index b62b3bacf0114..f76adadcc3e0a 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java @@ -116,7 +116,7 @@ public void handleLookup(CommandLookupTopic lookup) { log.debug("Lookup Request ID {} from {} rejected - {}.", clientRequestId, clientAddress, throttlingErrorMessage); } - writeAndFlush(Commands.newLookupErrorResponse(ServerError.ServiceNotReady, + writeAndFlush(Commands.newLookupErrorResponse(ServerError.TooManyRequests, throttlingErrorMessage, clientRequestId)); } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java index 3ecd670cbbf7a..db2969e3c3920 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java @@ -173,35 +173,43 @@ public class ProxyConfiguration implements PulsarConfiguration { @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The service url points to the broker cluster. URL must have the pulsar:// prefix." + doc = "If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the" + + " discovery service provider." + + " URL must have the pulsar:// prefix. And does not support multi url yet." ) private String brokerServiceURL; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The tls service url points to the broker cluster. URL must have the pulsar+ssl:// prefix." + doc = "If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the" + + " discovery service provider." + + " URL must have the pulsar+ssl:// prefix. And does not support multi url yet." ) private String brokerServiceURLTLS; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The web service url points to the broker cluster" + doc = "The web service url points to the discovery service provider of the broker cluster, and does not support" + + " multi url yet." ) private String brokerWebServiceURL; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The tls web service url points to the broker cluster" + doc = "The tls web service url points to the discovery service provider of the broker cluster, and does not" + + " support multi url yet." ) private String brokerWebServiceURLTLS; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The web service url points to the function worker cluster." + doc = "The web service url points to the discovery service provider of the function worker cluster, and does" + + " not support multi url yet." + " Only configure it when you setup function workers in a separate cluster" ) private String functionWorkerWebServiceURL; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The tls web service url points to the function worker cluster." + doc = "The tls web service url points to the discovery service provider of the function worker cluster, and" + + " does not support multi url yet." + " Only configure it when you setup function workers in a separate cluster" ) private String functionWorkerWebServiceURLTLS; @@ -371,6 +379,12 @@ public class ProxyConfiguration implements PulsarConfiguration { ) private int authenticationRefreshCheckSeconds = 60; + @FieldContext( + category = CATEGORY_HTTP, + doc = "Whether to enable the proxy's /metrics and /proxy-stats http endpoints" + ) + private boolean enableProxyStatsEndpoints = true; + @FieldContext( category = CATEGORY_AUTHENTICATION, doc = "Whether the '/metrics' endpoint requires authentication. Defaults to true." diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java index beee9f1a4f763..0cea5e9d18439 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java @@ -51,7 +51,6 @@ import org.apache.pulsar.common.util.ShutdownUtil; import org.apache.pulsar.proxy.stats.ProxyStats; import org.apache.pulsar.websocket.WebSocketConsumerServlet; -import org.apache.pulsar.websocket.WebSocketPingPongServlet; import org.apache.pulsar.websocket.WebSocketProducerServlet; import org.apache.pulsar.websocket.WebSocketReaderServlet; import org.apache.pulsar.websocket.WebSocketService; @@ -162,11 +161,28 @@ public ProxyServiceStarter(String[] args) throws Exception { if (isNotBlank(config.getBrokerServiceURL())) { checkArgument(config.getBrokerServiceURL().startsWith("pulsar://"), "brokerServiceURL must start with pulsar://"); + ensureUrlNotContainsComma("brokerServiceURL", config.getBrokerServiceURL()); } - if (isNotBlank(config.getBrokerServiceURLTLS())) { checkArgument(config.getBrokerServiceURLTLS().startsWith("pulsar+ssl://"), "brokerServiceURLTLS must start with pulsar+ssl://"); + ensureUrlNotContainsComma("brokerServiceURLTLS", config.getBrokerServiceURLTLS()); + } + + if (isNotBlank(config.getBrokerWebServiceURL())) { + ensureUrlNotContainsComma("brokerWebServiceURL", config.getBrokerWebServiceURL()); + } + if (isNotBlank(config.getBrokerWebServiceURLTLS())) { + ensureUrlNotContainsComma("brokerWebServiceURLTLS", config.getBrokerWebServiceURLTLS()); + } + + if (isNotBlank(config.getFunctionWorkerWebServiceURL())) { + ensureUrlNotContainsComma("functionWorkerWebServiceURLTLS", + config.getFunctionWorkerWebServiceURL()); + } + if (isNotBlank(config.getFunctionWorkerWebServiceURLTLS())) { + ensureUrlNotContainsComma("functionWorkerWebServiceURLTLS", + config.getFunctionWorkerWebServiceURLTLS()); } if ((isBlank(config.getBrokerServiceURL()) && isBlank(config.getBrokerServiceURLTLS())) @@ -187,6 +203,11 @@ public ProxyServiceStarter(String[] args) throws Exception { } } + private void ensureUrlNotContainsComma(String paramName, String paramValue) { + checkArgument(!paramValue.contains(","), paramName + " does not support multi urls yet," + + " it should point to the discovery service provider."); + } + public static void main(String[] args) throws Exception { ProxyServiceStarter serviceStarter = new ProxyServiceStarter(args); try { @@ -256,15 +277,21 @@ public static void addWebServerHandlers(WebServer server, ProxyConfiguration config, ProxyService service, BrokerDiscoveryProvider discoveryProvider) throws Exception { - if (service != null) { - PrometheusMetricsServlet metricsServlet = service.getMetricsServlet(); - if (metricsServlet != null) { - server.addServlet("/metrics", new ServletHolder(metricsServlet), - Collections.emptyList(), config.isAuthenticateMetricsEndpoint()); + // We can make 'status.html' publicly accessible without authentication since + // it does not contain any sensitive data. + server.addRestResource("/", VipStatus.ATTRIBUTE_STATUS_FILE_PATH, config.getStatusFilePath(), + VipStatus.class, false); + if (config.isEnableProxyStatsEndpoints()) { + server.addRestResource("/proxy-stats", ProxyStats.ATTRIBUTE_PULSAR_PROXY_NAME, service, + ProxyStats.class); + if (service != null) { + PrometheusMetricsServlet metricsServlet = service.getMetricsServlet(); + if (metricsServlet != null) { + server.addServlet("/metrics", new ServletHolder(metricsServlet), + Collections.emptyList(), config.isAuthenticateMetricsEndpoint()); + } } } - server.addRestResource("/", VipStatus.ATTRIBUTE_STATUS_FILE_PATH, config.getStatusFilePath(), VipStatus.class); - server.addRestResource("/proxy-stats", ProxyStats.ATTRIBUTE_PULSAR_PROXY_NAME, service, ProxyStats.class); AdminProxyHandler adminProxyHandler = new AdminProxyHandler(config, discoveryProvider); ServletHolder servletHolder = new ServletHolder(adminProxyHandler); @@ -315,12 +342,6 @@ public static void addWebServerHandlers(WebServer server, new ServletHolder(readerWebSocketServlet)); server.addServlet(WebSocketReaderServlet.SERVLET_PATH_V2, new ServletHolder(readerWebSocketServlet)); - - final WebSocketServlet pingPongWebSocketServlet = new WebSocketPingPongServlet(webSocketService); - server.addServlet(WebSocketPingPongServlet.SERVLET_PATH, - new ServletHolder(pingPongWebSocketServlet)); - server.addServlet(WebSocketPingPongServlet.SERVLET_PATH_V2, - new ServletHolder(pingPongWebSocketServlet)); } } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java index 1ca8dc93ebf9e..b95bbcab08b11 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java @@ -197,12 +197,20 @@ public void addServlet(String basePath, ServletHolder servletHolder, List> attributes, boolean requireAuthentication) { + addServlet(basePath, servletHolder, attributes, requireAuthentication, true); + } + + private void addServlet(String basePath, ServletHolder servletHolder, + List> attributes, boolean requireAuthentication, boolean checkForExistingPaths) { popularServletParams(servletHolder, config); - Optional existingPath = servletPaths.stream().filter(p -> p.startsWith(basePath)).findFirst(); - if (existingPath.isPresent()) { - throw new IllegalArgumentException( - String.format("Cannot add servlet at %s, path %s already exists", basePath, existingPath.get())); + if (checkForExistingPaths) { + Optional existingPath = servletPaths.stream().filter(p -> p.startsWith(basePath)).findFirst(); + if (existingPath.isPresent()) { + throw new IllegalArgumentException( + String.format("Cannot add servlet at %s, path %s already exists", basePath, + existingPath.get())); + } } servletPaths.add(basePath); @@ -231,17 +239,40 @@ private static void popularServletParams(ServletHolder servletHolder, ProxyConfi } } + /** + * Add a REST resource to the servlet context with authentication coverage. + * + * @see WebServer#addRestResource(String, String, Object, Class, boolean) + * + * @param basePath The base path for the resource. + * @param attribute An attribute associated with the resource. + * @param attributeValue The value of the attribute. + * @param resourceClass The class representing the resource. + */ public void addRestResource(String basePath, String attribute, Object attributeValue, Class resourceClass) { + addRestResource(basePath, attribute, attributeValue, resourceClass, true); + } + + /** + * Add a REST resource to the servlet context. + * + * @param basePath The base path for the resource. + * @param attribute An attribute associated with the resource. + * @param attributeValue The value of the attribute. + * @param resourceClass The class representing the resource. + * @param requireAuthentication A boolean indicating whether authentication is required for this resource. + */ + public void addRestResource(String basePath, String attribute, Object attributeValue, + Class resourceClass, boolean requireAuthentication) { ResourceConfig config = new ResourceConfig(); config.register(resourceClass); config.register(JsonMapperProvider.class); ServletHolder servletHolder = new ServletHolder(new ServletContainer(config)); servletHolder.setAsyncSupported(true); - ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); - context.setContextPath(basePath); - context.addServlet(servletHolder, MATCH_ALL); - context.setAttribute(attribute, attributeValue); - handlers.add(context); + // This method has not historically checked for existing paths, so we don't check here either. The + // method call is added to reduce code duplication. + addServlet(basePath, servletHolder, Collections.singletonList(Pair.of(attribute, attributeValue)), + requireAuthentication, false); } public int getExternalServicePort() { diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java index 27e61c90e9e26..3c85b5f2b71cc 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.proxy.stats; +import static java.util.concurrent.TimeUnit.SECONDS; import io.netty.channel.Channel; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; @@ -27,7 +28,10 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; @@ -36,7 +40,12 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response.Status; +import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationParameters; +import org.apache.pulsar.broker.web.AuthenticationFilter; import org.apache.pulsar.proxy.server.ProxyService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @@ -45,12 +54,15 @@ @Produces(MediaType.APPLICATION_JSON) public class ProxyStats { + private static final Logger log = LoggerFactory.getLogger(ProxyStats.class); public static final String ATTRIBUTE_PULSAR_PROXY_NAME = "pulsar-proxy"; private ProxyService service; @Context protected ServletContext servletContext; + @Context + protected HttpServletRequest httpRequest; @GET @Path("/connections") @@ -58,6 +70,7 @@ public class ProxyStats { response = List.class, responseContainer = "List") @ApiResponses(value = { @ApiResponse(code = 503, message = "Proxy service is not initialized") }) public List metrics() { + throwIfNotSuperUser("metrics"); List stats = new ArrayList<>(); proxyService().getClientCnxs().forEach(cnx -> { if (cnx.getDirectProxyHandler() == null) { @@ -78,7 +91,7 @@ public List metrics() { @ApiResponses(value = { @ApiResponse(code = 412, message = "Proxy logging should be > 2 to capture topic stats"), @ApiResponse(code = 503, message = "Proxy service is not initialized") }) public Map topics() { - + throwIfNotSuperUser("topics"); Optional logLevel = proxyService().getConfiguration().getProxyLogLevel(); if (!logLevel.isPresent() || logLevel.get() < 2) { throw new RestException(Status.PRECONDITION_FAILED, "Proxy doesn't have logging level 2"); @@ -92,6 +105,7 @@ public Map topics() { notes = "It only changes log-level in memory, change it config file to persist the change") @ApiResponses(value = { @ApiResponse(code = 412, message = "Proxy log level can be [0-2]"), }) public void updateProxyLogLevel(@PathParam("logLevel") int logLevel) { + throwIfNotSuperUser("updateProxyLogLevel"); if (logLevel < 0 || logLevel > 2) { throw new RestException(Status.PRECONDITION_FAILED, "Proxy log level can be only [0-2]"); } @@ -102,6 +116,7 @@ public void updateProxyLogLevel(@PathParam("logLevel") int logLevel) { @Path("/logging") @ApiOperation(hidden = true, value = "Get proxy logging") public int getProxyLogLevel(@PathParam("logLevel") int logLevel) { + throwIfNotSuperUser("getProxyLogLevel"); return proxyService().getProxyLogLevel(); } @@ -114,4 +129,26 @@ protected ProxyService proxyService() { } return service; } + + private void throwIfNotSuperUser(String action) { + if (proxyService().getConfiguration().isAuthorizationEnabled()) { + AuthenticationParameters authParams = AuthenticationParameters.builder() + .clientRole((String) httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName)) + .clientAuthenticationDataSource((AuthenticationDataSource) + httpRequest.getAttribute(AuthenticationFilter.AuthenticatedDataAttributeName)) + .build(); + try { + if (authParams.getClientRole() == null + || !proxyService().getAuthorizationService().isSuperUser(authParams).get(30, SECONDS)) { + log.error("Client with role [{}] is not authorized to {}", authParams.getClientRole(), action); + throw new org.apache.pulsar.common.util.RestException(Status.UNAUTHORIZED, + "Client is not authorized to perform operation"); + } + } catch (ExecutionException | TimeoutException | InterruptedException e) { + log.warn("Time-out {} sec while checking the role {} is a super user role ", 30, + authParams.getClientRole()); + throw new org.apache.pulsar.common.util.RestException(Status.INTERNAL_SERVER_ERROR, e.getMessage()); + } + } + } } diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AdminProxyHandlerKeystoreTLSTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AdminProxyHandlerKeystoreTLSTest.java new file mode 100644 index 0000000000000..d6796b7eaa6d2 --- /dev/null +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AdminProxyHandlerKeystoreTLSTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.proxy.server; + +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; +import org.apache.pulsar.broker.authentication.AuthenticationService; +import org.apache.pulsar.broker.resources.PulsarResources; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.impl.auth.AuthenticationKeyStoreTls; +import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.pulsar.policies.data.loadbalancer.LoadManagerReport; +import org.apache.pulsar.policies.data.loadbalancer.LoadReport; +import org.eclipse.jetty.servlet.ServletHolder; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; + +public class AdminProxyHandlerKeystoreTLSTest extends MockedPulsarServiceBaseTest { + + + private final ProxyConfiguration proxyConfig = new ProxyConfiguration(); + + private WebServer webServer; + + private BrokerDiscoveryProvider discoveryProvider; + + private PulsarResources resource; + + @BeforeMethod + @Override + protected void setup() throws Exception { + + conf.setAuthenticationEnabled(true); + conf.setAuthorizationEnabled(false); + conf.setWebServicePortTls(Optional.of(0)); + conf.setBrokerServicePortTls(Optional.of(0)); + conf.setTlsEnabledWithKeyStore(true); + conf.setTlsAllowInsecureConnection(false); + conf.setTlsKeyStoreType(KEYSTORE_TYPE); + conf.setTlsKeyStore(BROKER_KEYSTORE_FILE_PATH); + conf.setTlsKeyStorePassword(BROKER_KEYSTORE_PW); + conf.setTlsTrustStoreType(KEYSTORE_TYPE); + conf.setTlsTrustStore(CLIENT_TRUSTSTORE_FILE_PATH); + conf.setTlsTrustStorePassword(CLIENT_TRUSTSTORE_PW); + + super.internalSetup(); + + proxyConfig.setWebServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); + proxyConfig.setServicePortTls(Optional.of(0)); + proxyConfig.setWebServicePortTls(Optional.of(0)); + proxyConfig.setTlsEnabledWithBroker(true); + proxyConfig.setTlsEnabledWithKeyStore(true); + + proxyConfig.setTlsKeyStoreType(KEYSTORE_TYPE); + proxyConfig.setTlsKeyStore(BROKER_KEYSTORE_FILE_PATH); + proxyConfig.setTlsKeyStorePassword(BROKER_KEYSTORE_PW); + proxyConfig.setTlsTrustStoreType(KEYSTORE_TYPE); + proxyConfig.setTlsTrustStore(CLIENT_TRUSTSTORE_FILE_PATH); + proxyConfig.setTlsTrustStorePassword(CLIENT_TRUSTSTORE_PW); + + proxyConfig.setMetadataStoreUrl(DUMMY_VALUE); + proxyConfig.setConfigurationMetadataStoreUrl(GLOBAL_DUMMY_VALUE); + proxyConfig.setBrokerClientTlsEnabledWithKeyStore(true); + proxyConfig.setBrokerClientTlsKeyStoreType(KEYSTORE_TYPE); + proxyConfig.setBrokerClientTlsKeyStore(BROKER_KEYSTORE_FILE_PATH); + proxyConfig.setBrokerClientTlsKeyStorePassword(BROKER_KEYSTORE_PW); + proxyConfig.setBrokerClientTlsTrustStoreType(KEYSTORE_TYPE); + proxyConfig.setBrokerClientTlsTrustStore(BROKER_TRUSTSTORE_FILE_PATH); + proxyConfig.setBrokerClientTlsTrustStorePassword(BROKER_TRUSTSTORE_PW); + Set providers = new HashSet<>(); + providers.add(AuthenticationProviderTls.class.getName()); + proxyConfig.setAuthenticationProviders(providers); + proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationKeyStoreTls.class.getName()); + proxyConfig.setBrokerClientAuthenticationParameters(String.format("keyStoreType:%s,keyStorePath:%s,keyStorePassword:%s", + KEYSTORE_TYPE, BROKER_KEYSTORE_FILE_PATH, BROKER_KEYSTORE_PW)); + + resource = new PulsarResources(new ZKMetadataStore(mockZooKeeper), + new ZKMetadataStore(mockZooKeeperGlobal)); + webServer = new WebServer(proxyConfig, new AuthenticationService( + PulsarConfigurationLoader.convertFrom(proxyConfig))); + discoveryProvider = spy(new BrokerDiscoveryProvider(proxyConfig, resource)); + LoadManagerReport report = new LoadReport(brokerUrl.toString(), brokerUrlTls.toString(), null, null); + doReturn(report).when(discoveryProvider).nextBroker(); + ServletHolder servletHolder = new ServletHolder(new AdminProxyHandler(proxyConfig, discoveryProvider)); + webServer.addServlet("/admin", servletHolder); + webServer.addServlet("/lookup", servletHolder); + webServer.start(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + webServer.stop(); + super.internalCleanup(); + } + + PulsarAdmin getAdminClient() throws Exception { + return PulsarAdmin.builder() + .serviceHttpUrl("https://localhost:" + webServer.getListenPortHTTPS().get()) + .useKeyStoreTls(true) + .allowTlsInsecureConnection(false) + .tlsTrustStorePath(BROKER_TRUSTSTORE_FILE_PATH) + .tlsTrustStorePassword(BROKER_TRUSTSTORE_PW) + .authentication(AuthenticationKeyStoreTls.class.getName(), + String.format("keyStoreType:%s,keyStorePath:%s,keyStorePassword:%s", + KEYSTORE_TYPE, BROKER_KEYSTORE_FILE_PATH, BROKER_KEYSTORE_PW)) + .build(); + } + + @Test + public void testAdmin() throws Exception { + getAdminClient().clusters().createCluster(configClusterName, ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + } + +} diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java index de168284ab47f..af70276aed95e 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java @@ -55,10 +55,6 @@ public class AuthedAdminProxyHandlerTest extends MockedPulsarServiceBaseTest { private BrokerDiscoveryProvider discoveryProvider; private PulsarResources resource; - static String getTlsFile(String name) { - return String.format("./src/test/resources/authentication/tls-admin-proxy/%s.pem", name); - } - @BeforeMethod @Override protected void setup() throws Exception { @@ -67,9 +63,9 @@ protected void setup() throws Exception { conf.setAuthorizationEnabled(true); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsTrustCertsFilePath(getTlsFile("ca.cert")); - conf.setTlsCertificateFilePath(getTlsFile("broker.cert")); - conf.setTlsKeyFilePath(getTlsFile("broker.key-pk8")); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(false); conf.setSuperUserRoles(ImmutableSet.of("admin")); conf.setProxyRoles(ImmutableSet.of("proxy")); @@ -91,15 +87,15 @@ protected void setup() throws Exception { proxyConfig.setHttpMaxRequestHeaderSize(20000); // enable tls and auth&auth at proxy - proxyConfig.setTlsCertificateFilePath(getTlsFile("broker.cert")); - proxyConfig.setTlsKeyFilePath(getTlsFile("broker.key-pk8")); - proxyConfig.setTlsTrustCertsFilePath(getTlsFile("ca.cert")); + proxyConfig.setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); + proxyConfig.setTlsKeyFilePath(PROXY_KEY_FILE_PATH); + proxyConfig.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); proxyConfig.setBrokerClientAuthenticationParameters( String.format("tlsCertFile:%s,tlsKeyFile:%s", - getTlsFile("proxy.cert"), getTlsFile("proxy.key-pk8"))); - proxyConfig.setBrokerClientTrustCertsFilePath(getTlsFile("ca.cert")); + getTlsFileForClient("proxy.cert"), getTlsFileForClient("proxy.key-pk8"))); + proxyConfig.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); proxyConfig.setAuthenticationProviders(ImmutableSet.of(AuthenticationProviderTls.class.getName())); resource = new PulsarResources(new ZKMetadataStore(mockZooKeeper), @@ -128,22 +124,22 @@ protected void cleanup() throws Exception { PulsarAdmin getDirectToBrokerAdminClient(String user) throws Exception { return PulsarAdmin.builder() .serviceHttpUrl(brokerUrlTls.toString()) - .tlsTrustCertsFilePath(getTlsFile("ca.cert")) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .allowTlsInsecureConnection(false) .authentication(AuthenticationTls.class.getName(), - ImmutableMap.of("tlsCertFile", getTlsFile(user + ".cert"), - "tlsKeyFile", getTlsFile(user + ".key-pk8"))) + ImmutableMap.of("tlsCertFile", getTlsFileForClient(user + ".cert"), + "tlsKeyFile", getTlsFileForClient(user + ".key-pk8"))) .build(); } PulsarAdmin getAdminClient(String user) throws Exception { return PulsarAdmin.builder() .serviceHttpUrl("https://localhost:" + webServer.getListenPortHTTPS().get()) - .tlsTrustCertsFilePath(getTlsFile("ca.cert")) + .tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .allowTlsInsecureConnection(false) .authentication(AuthenticationTls.class.getName(), - ImmutableMap.of("tlsCertFile", getTlsFile(user + ".cert"), - "tlsKeyFile", getTlsFile(user + ".key-pk8"))) + ImmutableMap.of("tlsCertFile", getTlsFileForClient(user + ".cert"), + "tlsKeyFile", getTlsFileForClient(user + ".key-pk8"))) .build(); } diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java index 8229d929ee5e3..9c8e5197adf1a 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java @@ -168,7 +168,7 @@ protected void setup() throws Exception { conf.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); // Expires after an hour conf.setBrokerClientAuthenticationParameters( - "entityType:broker,expiryTime:" + (System.currentTimeMillis() + 3600 * 1000)); + "entityType:admin,expiryTime:" + (System.currentTimeMillis() + 3600 * 1000)); Set superUserRoles = new HashSet<>(); superUserRoles.add("admin"); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java index 97a73c20b60d0..a9a562e04c899 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java @@ -20,6 +20,8 @@ import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.beans.Introspector; @@ -36,6 +38,8 @@ import java.util.Properties; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; @Test(groups = "broker") public class ProxyConfigurationTest { @@ -134,4 +138,119 @@ public void testConvert() throws IOException { } } + @Test + public void testBrokerUrlCheck() throws IOException { + ProxyConfiguration configuration = new ProxyConfiguration(); + // brokerServiceURL must start with pulsar:// + configuration.setBrokerServiceURL("127.0.0.1:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURL must start with pulsar://"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("brokerServiceURL must start with pulsar://")); + } + } + configuration.setBrokerServiceURL("pulsar://127.0.0.1:6650"); + + // brokerServiceURLTLS must start with pulsar+ssl:// + configuration.setBrokerServiceURLTLS("pulsar://127.0.0.1:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURLTLS must start with pulsar+ssl://"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("brokerServiceURLTLS must start with pulsar+ssl://")); + } + } + + // brokerServiceURL did not support multi urls yet. + configuration.setBrokerServiceURL("pulsar://127.0.0.1:6650,pulsar://127.0.0.2:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURL does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerServiceURL("pulsar://127.0.0.1:6650"); + + // brokerServiceURLTLS did not support multi urls yet. + configuration.setBrokerServiceURLTLS("pulsar+ssl://127.0.0.1:6650,pulsar+ssl:127.0.0.2:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURLTLS does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerServiceURLTLS("pulsar+ssl://127.0.0.1:6650"); + + // brokerWebServiceURL did not support multi urls yet. + configuration.setBrokerWebServiceURL("http://127.0.0.1:8080,http://127.0.0.2:8080"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerWebServiceURL does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerWebServiceURL("http://127.0.0.1:8080"); + + // brokerWebServiceURLTLS did not support multi urls yet. + configuration.setBrokerWebServiceURLTLS("https://127.0.0.1:443,https://127.0.0.2:443"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerWebServiceURLTLS does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerWebServiceURLTLS("https://127.0.0.1:443"); + + // functionWorkerWebServiceURL did not support multi urls yet. + configuration.setFunctionWorkerWebServiceURL("http://127.0.0.1:8080,http://127.0.0.2:8080"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("functionWorkerWebServiceURL does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setFunctionWorkerWebServiceURL("http://127.0.0.1:8080"); + + // functionWorkerWebServiceURLTLS did not support multi urls yet. + configuration.setFunctionWorkerWebServiceURLTLS("http://127.0.0.1:443,http://127.0.0.2:443"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("functionWorkerWebServiceURLTLS does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setFunctionWorkerWebServiceURLTLS("http://127.0.0.1:443"); + } + } \ No newline at end of file diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java index 99af3b1cf6abe..b7cfb87474707 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java @@ -53,7 +53,7 @@ protected void setup() throws Exception { conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); conf.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); - conf.setBrokerClientAuthenticationParameters("authParam:broker"); + conf.setBrokerClientAuthenticationParameters("authParam:admin"); conf.setAuthenticateOriginalAuthData(true); Set superUserRoles = new HashSet(); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java index 4861117ef6ff5..1b63aa14dfe42 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java @@ -20,18 +20,26 @@ import static org.mockito.Mockito.doReturn; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.util.Optional; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.authentication.AuthenticationService; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.BinaryProtoLookupService; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.LookupService; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.metadata.impl.ZKMetadataStore; import org.mockito.Mockito; import org.testng.Assert; @@ -112,4 +120,32 @@ public void testLookup() throws Exception { Assert.assertEquals(LookupProxyHandler.REJECTED_PARTITIONS_METADATA_REQUESTS.get(), 5.0d); } + + @Test + public void testLookupThrottling() throws Exception { + PulsarClientImpl client = (PulsarClientImpl) PulsarClient.builder() + .serviceUrl(proxyService.getServiceUrl()).build(); + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + LookupService lookupService = client.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + ClientCnx lookupConnection = client.getCnxPool().getConnection(lookupService.resolveHost()).join(); + + // Make no permits to lookup. + Semaphore lookupSemaphore = proxyService.getLookupRequestSemaphore(); + int availablePermits = lookupSemaphore.availablePermits(); + lookupSemaphore.acquire(availablePermits); + + // Verify will receive too many request exception, and the socket will not be closed. + try { + lookupService.getBroker(TopicName.get(tpName)).get(); + fail("Expected too many request error."); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("Too many")); + } + assertTrue(lookupConnection.ctx().channel().isActive()); + + // cleanup. + lookupSemaphore.release(availablePermits); + client.close(); + } } diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java index bde989fc432f9..2f36cc679f1f2 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java @@ -69,6 +69,7 @@ protected void doInitConf() throws Exception { conf.setAdvertisedAddress(null); conf.setAuthenticateOriginalAuthData(true); conf.setBrokerServicePort(Optional.of(0)); + conf.setWebServicePortTls(Optional.of(0)); conf.setWebServicePort(Optional.of(0)); Set superUserRoles = new HashSet<>(); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java index 2c8c382b6a5ef..3259cfd95c741 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java @@ -144,7 +144,7 @@ protected void setup() throws Exception { conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); conf.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); - conf.setBrokerClientAuthenticationParameters("authParam:broker"); + conf.setBrokerClientAuthenticationParameters("authParam:admin"); Set superUserRoles = new HashSet<>(); superUserRoles.add("admin"); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java index 4dcfc17096448..a9bead706a373 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java @@ -45,7 +45,7 @@ public class ProxyServiceStarterTest extends MockedPulsarServiceBaseTest { - static final String[] ARGS = new String[]{"-c", "./src/test/resources/proxy.conf"}; + public static final String[] ARGS = new String[]{"-c", "./src/test/resources/proxy.conf"}; protected ProxyServiceStarter serviceStarter; protected String serviceUrl; @@ -76,18 +76,6 @@ private String computeWsBasePath() { return String.format("ws://localhost:%d/ws", serviceStarter.getServer().getListenPortHTTP().get()); } - @Test - public void testEnableWebSocketServer() throws Exception { - HttpClient httpClient = new HttpClient(); - WebSocketClient webSocketClient = new WebSocketClient(httpClient); - webSocketClient.start(); - MyWebSocket myWebSocket = new MyWebSocket(); - String webSocketUri = computeWsBasePath() + "/pingpong"; - Future sessionFuture = webSocketClient.connect(myWebSocket, URI.create(webSocketUri)); - System.out.println("uri" + webSocketUri); - sessionFuture.get().getRemote().sendPing(ByteBuffer.wrap("ping".getBytes())); - assertTrue(myWebSocket.getResponse().contains("ping")); - } @Test public void testProducer() throws Exception { diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java index 0bc7c525384df..6247c2a66e874 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java @@ -47,10 +47,6 @@ import static org.testng.Assert.assertTrue; public class ProxyServiceTlsStarterTest extends MockedPulsarServiceBaseTest { - - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_PROXY_CERT_FILE_PATH = "./src/test/resources/authentication/tls/server-cert.pem"; - private final String TLS_PROXY_KEY_FILE_PATH = "./src/test/resources/authentication/tls/server-key.pem"; private ProxyServiceStarter serviceStarter; private String serviceUrl; private int webPort; @@ -63,14 +59,14 @@ protected void setup() throws Exception { serviceStarter.getConfig().setBrokerServiceURL(pulsar.getBrokerServiceUrl()); serviceStarter.getConfig().setBrokerServiceURLTLS(pulsar.getBrokerServiceUrlTls()); serviceStarter.getConfig().setBrokerWebServiceURL(pulsar.getWebServiceAddress()); - serviceStarter.getConfig().setBrokerClientTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + serviceStarter.getConfig().setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); serviceStarter.getConfig().setServicePort(Optional.empty()); serviceStarter.getConfig().setServicePortTls(Optional.of(0)); serviceStarter.getConfig().setWebServicePort(Optional.of(0)); serviceStarter.getConfig().setTlsEnabledWithBroker(true); serviceStarter.getConfig().setWebSocketServiceEnabled(true); - serviceStarter.getConfig().setTlsCertificateFilePath(TLS_PROXY_CERT_FILE_PATH); - serviceStarter.getConfig().setTlsKeyFilePath(TLS_PROXY_KEY_FILE_PATH); + serviceStarter.getConfig().setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); + serviceStarter.getConfig().setTlsKeyFilePath(PROXY_KEY_FILE_PATH); serviceStarter.getConfig().setBrokerProxyAllowedTargetPorts("*"); serviceStarter.start(); serviceUrl = serviceStarter.getProxyService().getServiceUrlTls(); @@ -79,8 +75,9 @@ protected void setup() throws Exception { protected void doInitConf() throws Exception { super.doInitConf(); - this.conf.setTlsCertificateFilePath(TLS_PROXY_CERT_FILE_PATH); - this.conf.setTlsKeyFilePath(TLS_PROXY_KEY_FILE_PATH); + this.conf.setBrokerServicePortTls(Optional.of(0)); + this.conf.setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); + this.conf.setTlsKeyFilePath(PROXY_KEY_FILE_PATH); } @Override @@ -94,7 +91,7 @@ protected void cleanup() throws Exception { public void testProducer() throws Exception { @Cleanup PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl) - .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(CA_CERT_FILE_PATH) .build(); @Cleanup diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java index 237cf5a48112c..e799e2e948a4a 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java @@ -259,6 +259,34 @@ public void testRegexSubscription() throws Exception { } } + @Test(timeOut = 60_000) + public void testRegexSubscriptionWithTopicDiscovery() throws Exception { + @Cleanup + PulsarClient client = PulsarClient.builder().serviceUrl(proxyService.getServiceUrl()).build(); + String subName = "regex-proxy-test-" + System.currentTimeMillis(); + String regexSubscriptionPattern = "persistent://sample/test/local/.*"; + try (Consumer consumer = client.newConsumer() + .topicsPattern(regexSubscriptionPattern) + .subscriptionName(subName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .patternAutoDiscoveryPeriod(10, TimeUnit.MINUTES) + .subscribe()) { + final int topics = 10; + final String topicPrefix = "persistent://sample/test/local/regex-topic-"; + for (int i = 0; i < topics; i++) { + Producer producer = client.newProducer(Schema.BYTES) + .topic(topicPrefix + i) + .create(); + producer.send(("" + i).getBytes(UTF_8)); + producer.close(); + } + for (int i = 0; i < topics; i++) { + Message msg = consumer.receive(); + assertEquals(topicPrefix + new String(msg.getValue(), UTF_8), msg.getTopicName()); + } + } + } + @Test public void testGetSchema() throws Exception { @Cleanup diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java index e1cf62aafa8a3..64b0cd6b1a610 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java @@ -43,10 +43,6 @@ public class ProxyTlsTest extends MockedPulsarServiceBaseTest { - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_PROXY_CERT_FILE_PATH = "./src/test/resources/authentication/tls/server-cert.pem"; - private final String TLS_PROXY_KEY_FILE_PATH = "./src/test/resources/authentication/tls/server-key.pem"; - private ProxyService proxyService; private ProxyConfiguration proxyConfig = new ProxyConfiguration(); @@ -61,8 +57,8 @@ protected void setup() throws Exception { proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); proxyConfig.setTlsEnabledWithBroker(false); - proxyConfig.setTlsCertificateFilePath(TLS_PROXY_CERT_FILE_PATH); - proxyConfig.setTlsKeyFilePath(TLS_PROXY_KEY_FILE_PATH); + proxyConfig.setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); + proxyConfig.setTlsKeyFilePath(PROXY_KEY_FILE_PATH); proxyConfig.setMetadataStoreUrl(DUMMY_VALUE); proxyConfig.setConfigurationMetadataStoreUrl(GLOBAL_DUMMY_VALUE); @@ -87,7 +83,7 @@ public void testProducer() throws Exception { @Cleanup PulsarClient client = PulsarClient.builder() .serviceUrl(proxyService.getServiceUrlTls()) - .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).build(); + .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(CA_CERT_FILE_PATH).build(); Producer producer = client.newProducer(Schema.BYTES).topic("persistent://sample/test/local/topic").create(); for (int i = 0; i < 10; i++) { @@ -100,7 +96,7 @@ public void testPartitions() throws Exception { @Cleanup PulsarClient client = PulsarClient.builder() .serviceUrl(proxyService.getServiceUrlTls()) - .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).build(); + .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(CA_CERT_FILE_PATH).build(); TenantInfoImpl tenantInfo = createDefaultTenantInfo(); admin.tenants().createTenant("sample", tenantInfo); admin.topics().createPartitionedTopic("persistent://sample/test/local/partitioned-topic", 2); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java index d5b70dfa03756..0f1fa74a20916 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java @@ -24,6 +24,7 @@ import java.io.FileWriter; import java.util.Optional; +import org.apache.pulsar.broker.auth.MockOIDCIdentityProvider; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.authentication.AuthenticationService; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; @@ -35,24 +36,24 @@ public class ProxyTlsTestWithAuth extends MockedPulsarServiceBaseTest { - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_PROXY_CERT_FILE_PATH = "./src/test/resources/authentication/tls/server-cert.pem"; - private final String TLS_PROXY_KEY_FILE_PATH = "./src/test/resources/authentication/tls/server-key.pem"; - private ProxyService proxyService; private ProxyConfiguration proxyConfig = new ProxyConfiguration(); + private MockOIDCIdentityProvider server; + @Override @BeforeClass protected void setup() throws Exception { internalSetup(); + String clientSecret = "super-secret-client-secret"; + server = new MockOIDCIdentityProvider(clientSecret, "an-audience", 3000); File tempFile = File.createTempFile("oauth2", ".tmp"); tempFile.deleteOnExit(); FileWriter writer = new FileWriter(tempFile); writer.write("{\n" + - " \"client_id\":\"Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x\",\n" + - " \"client_secret\":\"rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb\"\n" + + " \"client_id\":\"my-user\",\n" + + " \"client_secret\":\"" + clientSecret + "\"\n" + "}"); writer.flush(); writer.close(); @@ -63,14 +64,14 @@ protected void setup() throws Exception { proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); proxyConfig.setTlsEnabledWithBroker(true); - proxyConfig.setTlsCertificateFilePath(TLS_PROXY_CERT_FILE_PATH); - proxyConfig.setTlsKeyFilePath(TLS_PROXY_KEY_FILE_PATH); + proxyConfig.setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); + proxyConfig.setTlsKeyFilePath(PROXY_KEY_FILE_PATH); proxyConfig.setMetadataStoreUrl(DUMMY_VALUE); proxyConfig.setConfigurationMetadataStoreUrl(GLOBAL_DUMMY_VALUE); proxyConfig.setBrokerClientAuthenticationPlugin("org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2"); proxyConfig.setBrokerClientAuthenticationParameters("{\"grant_type\":\"client_credentials\"," + - " \"issuerUrl\":\"https://dev-kt-aa9ne.us.auth0.com\"," + - " \"audience\": \"https://dev-kt-aa9ne.us.auth0.com/api/v2/\"," + + " \"issuerUrl\":\"" + server.getIssuer() + "\"," + + " \"audience\": \"an-audience\"," + " \"privateKey\":\"file://" + tempFile.getAbsolutePath() + "\"}"); proxyService = Mockito.spy(new ProxyService(proxyConfig, new AuthenticationService( @@ -85,8 +86,8 @@ protected void setup() throws Exception { @AfterClass(alwaysRun = true) protected void cleanup() throws Exception { internalCleanup(); - proxyService.close(); + server.stop(); } @Test diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java index e8bb128c8c190..2d97a4b06a856 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java @@ -78,6 +78,8 @@ public class ProxyWithAuthorizationNegTest extends ProducerConsumerBase { protected void setup() throws Exception { // enable tls and auth&auth at broker + conf.setTopicLevelPoliciesEnabled(false); + conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java index e912006faa022..88ecfe8a3187b 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java @@ -116,6 +116,7 @@ protected void setup() throws Exception { proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); proxyConfig.setBrokerClientAuthenticationParameters(PROXY_TOKEN); proxyConfig.setAuthenticationProviders(providers); + proxyConfig.setStatusFilePath("./src/test/resources/vip_status.html"); AuthenticationService authService = new AuthenticationService(PulsarConfigurationLoader.convertFrom(proxyConfig)); @@ -405,6 +406,29 @@ public void testProxyAuthorizationWithPrefixSubscriptionAuthMode() throws Except log.info("-- Exiting {} test --", methodName); } + @Test + void testGetStatus() throws Exception { + log.info("-- Starting {} test --", methodName); + final PulsarResources resource = new PulsarResources(new ZKMetadataStore(mockZooKeeper), + new ZKMetadataStore(mockZooKeeperGlobal)); + final AuthenticationService authService = new AuthenticationService( + PulsarConfigurationLoader.convertFrom(proxyConfig)); + final WebServer webServer = new WebServer(proxyConfig, authService); + ProxyServiceStarter.addWebServerHandlers(webServer, proxyConfig, proxyService, + new BrokerDiscoveryProvider(proxyConfig, resource)); + webServer.start(); + @Cleanup + final Client client = javax.ws.rs.client.ClientBuilder + .newClient(new ClientConfig().register(LoggingFeature.class)); + try { + final Response r = client.target(webServer.getServiceUri()).path("/status.html").request().get(); + Assert.assertEquals(r.getStatus(), Response.Status.OK.getStatusCode()); + } finally { + webServer.stop(); + } + log.info("-- Exiting {} test --", methodName); + } + @Test void testGetMetrics() throws Exception { log.info("-- Starting {} test --", methodName); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java index 6a61decfcc693..ec1412b021d7b 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java @@ -54,13 +54,6 @@ public class ProxyWithoutServiceDiscoveryTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(ProxyWithoutServiceDiscoveryTest.class); - - private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; - private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/server-cert.pem"; - private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/server-key.pem"; - private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; - private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; - private ProxyService proxyService; private ProxyConfiguration proxyConfig = new ProxyConfiguration(); @@ -68,24 +61,32 @@ public class ProxyWithoutServiceDiscoveryTest extends ProducerConsumerBase { @Override protected void setup() throws Exception { + // Disable topic policy + conf.setTopicLevelPoliciesEnabled(false); + // enable tls and auth&auth at broker conf.setAuthenticationEnabled(true); - conf.setAuthorizationEnabled(false); + conf.setAuthorizationEnabled(true); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); - conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - conf.setTlsAllowInsecureConnection(true); + conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); + conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); Set superUserRoles = new HashSet<>(); - superUserRoles.add("superUser"); + superUserRoles.add("admin"); + superUserRoles.add("superproxy"); conf.setSuperUserRoles(superUserRoles); + Set proxyRoles = new HashSet<>(); + proxyRoles.add("superproxy"); + conf.setProxyRoles(proxyRoles); + + conf.setBrokerClientTlsEnabled(true); conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); - conf.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_SERVER_KEY_FILE_PATH); + conf.setBrokerClientAuthenticationParameters(String.format("tlsCertFile:%s,tlsKeyFile:%s", + getTlsFileForClient("admin.cert"), getTlsFileForClient("admin.key-pk8"))); Set providers = new HashSet<>(); providers.add(AuthenticationProviderTls.class.getName()); @@ -110,14 +111,14 @@ protected void setup() throws Exception { proxyConfig.setTlsEnabledWithBroker(true); // enable tls and auth&auth at proxy - proxyConfig.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); - proxyConfig.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); - proxyConfig.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + proxyConfig.setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); + proxyConfig.setTlsKeyFilePath(PROXY_KEY_FILE_PATH); + proxyConfig.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); - proxyConfig.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); - proxyConfig.setBrokerClientTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + proxyConfig.setBrokerClientAuthenticationParameters(String.format("tlsCertFile:%s,tlsKeyFile:%s", + getTlsFileForClient("superproxy.cert"), getTlsFileForClient("superproxy.key-pk8"))); + proxyConfig.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); proxyConfig.setAuthenticationProviders(providers); @@ -137,7 +138,7 @@ protected void cleanup() throws Exception { /** *
    -     * It verifies e2e tls + Authentication + Authorization (client -> proxy -> broker>
    +     * It verifies e2e tls + Authentication + Authorization (client -> proxy -> broker)
          *
          * 1. client connects to proxy over tls and pass auth-data
          * 2. proxy authenticate client and retrieve client-role
    @@ -154,8 +155,8 @@ public void testDiscoveryService() throws Exception {
             log.info("-- Starting {} test --", methodName);
     
             Map authParams = Maps.newHashMap();
    -        authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
    -        authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
    +        authParams.put("tlsCertFile", getTlsFileForClient("admin.cert"));
    +        authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8"));
             Authentication authTls = new AuthenticationTls();
             authTls.configure(authParams);
             // create a client which connects to proxy over tls and pass authData
    @@ -198,10 +199,10 @@ public void testDiscoveryService() throws Exception {
         }
     
         protected final PulsarClient createPulsarClient(Authentication auth, String lookupUrl) throws Exception {
    -        admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()).tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH)
    -                .allowTlsInsecureConnection(true).authentication(auth).build());
    +        admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()).tlsTrustCertsFilePath(CA_CERT_FILE_PATH)
    +                .authentication(auth).build());
             return PulsarClient.builder().serviceUrl(lookupUrl).statsInterval(0, TimeUnit.SECONDS)
    -                .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true).authentication(auth)
    +                .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).authentication(auth)
                     .enableTls(true).build();
         }
     
    diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java
    index 5b7cb88f98280..d3291c8fb910d 100644
    --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java
    +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java
    @@ -51,10 +51,6 @@ public class SuperUserAuthedAdminProxyHandlerTest extends MockedPulsarServiceBas
         private BrokerDiscoveryProvider discoveryProvider;
         private PulsarResources resource;
     
    -    static String getTlsFile(String name) {
    -        return String.format("./src/test/resources/authentication/tls-admin-proxy/%s.pem", name);
    -    }
    -
         @BeforeMethod
         @Override
         protected void setup() throws Exception {
    @@ -64,9 +60,9 @@ protected void setup() throws Exception {
     
             conf.setBrokerServicePortTls(Optional.of(0));
             conf.setWebServicePortTls(Optional.of(0));
    -        conf.setTlsTrustCertsFilePath(getTlsFile("ca.cert"));
    -        conf.setTlsCertificateFilePath(getTlsFile("broker.cert"));
    -        conf.setTlsKeyFilePath(getTlsFile("broker.key-pk8"));
    +        conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH);
    +        conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH);
    +        conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH);
             conf.setTlsAllowInsecureConnection(false);
             conf.setSuperUserRoles(ImmutableSet.of("admin", "superproxy"));
             conf.setProxyRoles(ImmutableSet.of("superproxy"));
    @@ -86,15 +82,15 @@ protected void setup() throws Exception {
             proxyConfig.setTlsEnabledWithBroker(true);
     
             // enable tls and auth&auth at proxy
    -        proxyConfig.setTlsCertificateFilePath(getTlsFile("broker.cert"));
    -        proxyConfig.setTlsKeyFilePath(getTlsFile("broker.key-pk8"));
    -        proxyConfig.setTlsTrustCertsFilePath(getTlsFile("ca.cert"));
    +        proxyConfig.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH);
    +        proxyConfig.setTlsKeyFilePath(BROKER_KEY_FILE_PATH);
    +        proxyConfig.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH);
     
             proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName());
             proxyConfig.setBrokerClientAuthenticationParameters(
                     String.format("tlsCertFile:%s,tlsKeyFile:%s",
    -                              getTlsFile("superproxy.cert"), getTlsFile("superproxy.key-pk8")));
    -        proxyConfig.setBrokerClientTrustCertsFilePath(getTlsFile("ca.cert"));
    +                              getTlsFileForClient("superproxy.cert"), getTlsFileForClient("superproxy.key-pk8")));
    +        proxyConfig.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH);
             proxyConfig.setAuthenticationProviders(ImmutableSet.of(AuthenticationProviderTls.class.getName()));
     
             resource = new PulsarResources(new ZKMetadataStore(mockZooKeeper),
    @@ -123,11 +119,11 @@ protected void cleanup() throws Exception {
         PulsarAdmin getAdminClient(String user) throws Exception {
             return PulsarAdmin.builder()
                 .serviceHttpUrl("https://localhost:" + webServer.getListenPortHTTPS().get())
    -            .tlsTrustCertsFilePath(getTlsFile("ca.cert"))
    +            .tlsTrustCertsFilePath(CA_CERT_FILE_PATH)
                 .allowTlsInsecureConnection(false)
                 .authentication(AuthenticationTls.class.getName(),
    -                    ImmutableMap.of("tlsCertFile", getTlsFile(user + ".cert"),
    -                                    "tlsKeyFile", getTlsFile(user + ".key-pk8")))
    +                    ImmutableMap.of("tlsCertFile", getTlsFileForClient(user + ".cert"),
    +                                    "tlsKeyFile", getTlsFileForClient(user + ".key-pk8")))
                 .build();
         }
     
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/admin.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/admin.cert.pem
    deleted file mode 100644
    index 0665edbdc126c..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/admin.cert.pem
    +++ /dev/null
    @@ -1,26 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIEZTCCAk2gAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTE4MDYyMjA4NTcwNloYDzIyOTIwNDA2MDg1NzA2WjAQMQ4wDAYDVQQD
    -DAVhZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJw3Jfbn0xkW
    -36kqQjES6Hn+YTZ2jXS5Co2MzsGBsIY0qJ2BbWHSSaMrja4IERUaCQp16SWxPmZ0
    -srMm6ErDoap+O70CWXLT3ybYMV5aVwv3ca4uxsedzaw9MpFXfUDsJJ3yre1JpO+t
    -A/QzJEGq1d6NN49InUP5kB1Rpay3vaxx8hduzqTO+E/Lptv92p6GjOpXi2icSjiA
    -pgaan2ldGGKEKv2Sc2bfdIDkTq1yDyNmuPET0yD2dci106EW/mPyj81umPKG/o4K
    -5W18yG/IhXw5W1zlgO1fWCuqva8NCBdu7s1c7hUX8DBx7km4/I7dllz/nYHIfCEQ
    -Dmj38oQjYk8CAwEAAaOBxTCBwjAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF
    -oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp
    -ZmljYXRlMB0GA1UdDgQWBBQTMzAAOJ9gXvQSS7Be3+qmrb1kVDAfBgNVHSMEGDAW
    -gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw
    -FAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0GCSqGSIb3DQEBCwUAA4ICAQAwy8f6hsG0
    -85e3SOIztbUnaVxS7wzDeDzR3vCjpXpm4vTToYzN9zx3JHKSdJrB12emVxItwW/7
    -bXqBk0n2EdQjRHCuebnY05eFMNGagMEEMVmSLOproQD7VsyALNxCss1JAyRikh70
    -W7wgOVeAhqE53UqqrkzTE7Q+8Bag9t3FytHxApY17XglbWkiVcFpQwSURe9Emi3E
    -aCJCryGJXrBNuCFXGzetSygDEy27+2FeH8S2XsMwUEGLqDDehzvMenVz1xjXtq+s
    -KPkofAde52NHd4lLkSeBMSFnKe3V7Xxax2OEUsoQRF3bkbpcJSWsKS9ZAA2yrtuy
    -Nz/aA1F42LuSFPAYQr1kcZ8eSS918RWz+BiJYU2JuUOPd1XUmJXVvZ4CJurWaC7+
    -ZD51YdD8E245xd55fsA6/qLx3eE/Kp0dVq+Hxuz6b4yLET0zkGunOe4A3hnRgkOA
    -XolXCL+VthhWtFGXn8CjpxDnzjahq69Io+dINehqd5aJEgvnHZIK2s7FTqqBBodU
    -HhyAE94f64z7ziuRhEG54bmBF+MoGyPf6dVn1Mp3+o+YeQ5q6XlKgh+u8jMgmqRO
    -ikdsVdMqopt/FXh9eFzvQrwOZFLK6JE/edUgb6xvS1jMF5zi2lIlIkq1RBQOr4HT
    -XDwX4vRtfpDxpsetGVpeq9O07fbMvp+mkw==
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/admin.key-pk8.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/admin.key-pk8.pem
    deleted file mode 100644
    index 6aaa22c44d746..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/admin.key-pk8.pem
    +++ /dev/null
    @@ -1,28 +0,0 @@
    ------BEGIN PRIVATE KEY-----
    -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCcNyX259MZFt+p
    -KkIxEuh5/mE2do10uQqNjM7BgbCGNKidgW1h0kmjK42uCBEVGgkKdeklsT5mdLKz
    -JuhKw6Gqfju9Ally098m2DFeWlcL93GuLsbHnc2sPTKRV31A7CSd8q3tSaTvrQP0
    -MyRBqtXejTePSJ1D+ZAdUaWst72scfIXbs6kzvhPy6bb/dqehozqV4tonEo4gKYG
    -mp9pXRhihCr9knNm33SA5E6tcg8jZrjxE9Mg9nXItdOhFv5j8o/Nbpjyhv6OCuVt
    -fMhvyIV8OVtc5YDtX1grqr2vDQgXbu7NXO4VF/Awce5JuPyO3ZZc/52ByHwhEA5o
    -9/KEI2JPAgMBAAECggEAP+Ipq2Q4puz8wGBgu1LhMWp+9Nfcl1xI3YQ01VulBe0o
    -+2h/g96McKcSBJaV7cw84ENB+kEWpK2amrsRiemhBmkjIvOAAv50Jp2I6u4E5Qbn
    -PXUxo1Z8UrCgKmHd/hvUCafByuUwBzf5AvebHyOu3JlhnD302mSHtAW8u/pUHd3D
    -sxJaw1zwQvmlD5ryM2IVYSji7NYCXF0H6V7HfyohTCrQFEWEAdqEDcFR1BUwbPCE
    -raq7sAiEy+cBUnfV3IOEAffOZy0vSR90/WwERcwrzCdZmpWpTqtbcqtdBPqsSQzX
    -shDvXd0e43+FSJzCtQsSQ8WzIrp3rgKJUDA1pJQW4QKBgQDJdTcB6qZz8r+4q9gc
    -q1KAJyMy01Vio1yaqYzXr0C9Z5FW1GhL+4fwer2y9JyD45sb/lP4reFj9S193BNR
    -C8cdxM5GrWEpzaQ0Dt1s8P1UbU8G6r4NqwI6ORF3CxXZKfXivQcgqBurJGrBdjIC
    -NwqAzSkX5flBbhfTlJuUH87k3wKBgQDGgjzdIWXab7FZfdUzzrtEwNooBiSEFixm
    -UAwP5sxL8VkM9wzAKPEVDDQBBoKIga9OESif4S9UUo4tu65AYfxF9Om26K4QrVj8
    -HT/U+lfT7xFmPd/GINIbeTSmW0w7Ehpj8SbcQI4Sb2lVE562FlHh7QbHZd0/X/2J
    -nbgT9MRAkQKBgQCVPAN3o/+SPOzRPFtnQXJoBJYKfIrv+twKpjbzP5vRsvrzO33X
    -a4kUF5iXDKU0/lJUtl42BXjFt0Xvyit1CiiCYNv9d0pW0UMmXSyiGxNOi3rTQOlw
    -7pFD2Cqb6NZSfMbtI+I3ytBUQzHiBlCdW3CoYVJjpbSzR37W+WsWm0mEOQKBgQCq
    -ANObFYUjA1DBMZCrY7rhcL/kUw5myI6RuK/71k7UIwd+oP0cfHOq8N6AmlCkE1xM
    -4UkHU1SzRFhbNkZPARuJ1etqJ+8afTqd/3axMQyShkVCaG8CQQ1vVegPKFUqqaBM
    -QzRioC6L/zoYEEt16buKXvHVRpmqMszxVE9XV+HS4QKBgDvs5qloOowS5kcWInrj
    -yecu5MJvFf2IZpMw7EiKV8VUPeKaiUlqgFj9d9cotUIMauXgBq6f5NBRg7Ike60t
    -/JJrPtqXY+gdFLjxcKMUVYhomFlQYYg/RJZUBrtkyKBP68abopCYmb59r3ixeNNf
    -qA1F36mmFtzdjSdtH/dTTecN
    ------END PRIVATE KEY-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/broker.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/broker.cert.pem
    deleted file mode 100644
    index b5c7a5dc709a1..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/broker.cert.pem
    +++ /dev/null
    @@ -1,27 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIEkDCCAnigAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTE4MDYyMjA4NTUzMloYDzIyOTIwNDA2MDg1NTMyWjAjMSEwHwYDVQQD
    -DBhicm9rZXIucHVsc2FyLmFwYWNoZS5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IB
    -DwAwggEKAoIBAQDQouKhZah4hMCqmg4aS5RhQG/Y1gA+yP9DGF9mlw35tfhfWs63
    -EvNjEK4L/ZWSEV45L/wc6YV14RmM6bJ0V/0vXo4xmISbqptND/2kRIspkLZQ5F0O
    -OQXVicqZLOc6igZQhRg8ANDYdTJUTF65DqauX4OJt3YMhF2FSt7jQtlj06IQBa01
    -+ARO9OotMJtBY+vIU5bV6JydfgkhQH9rIDI7AMeY5j02gGkJJrelfm+WoOsUez+X
    -aqTN3/tF8+MBcFB3G04s1qc2CJPJM3YGxvxEtHqTGI14t9J8p5O7X9JHpcY8X00s
    -bxa4FGbKgfDobbkJ+GgblWCkAcLN95sKTqtHAgMBAAGjgd0wgdowCQYDVR0TBAIw
    -ADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2Vu
    -ZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUaxFvJrkEGqk8azTA
    -DyVyTyTbJAIwQQYDVR0jBDowOIAUVwvpyyPov0c+UHo/RX6hGEOdFSehFaQTMBEx
    -DzANBgNVBAMMBmZvb2JhcoIJANfih0+geeIMMA4GA1UdDwEB/wQEAwIFoDATBgNV
    -HSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAgEA35QDGclHzQtHs3yQ
    -ZzNOSKisg5srTiIoQgRzfHrXfkthNFCnBzhKjBxqk3EIasVtvyGuk0ThneC1ai3y
    -ZK3BivnMZfm1SfyvieFoqWetsxohWfcpOSVkpvO37P6v/NmmaTIGkBN3gxKCx0QN
    -zqApLQyNTM++X3wxetYH/afAGUrRmBGWZuJheQpB9yZ+FB6BRp8YuYIYBzANJyW9
    -spvXW03TpqX2AIoRBoGMLzK72vbhAbLWiCIfEYREhbZVRkP+yvD338cWrILlOEur
    -x/n8L/FTmbf7mXzHg4xaQ3zg/5+0OCPMDPUBE4xWDBAbZ82hgOcTqfVjwoPgo2V0
    -fbbx6redq44J3Vn5d9Xhi59fkpqEjHpX4xebr5iMikZsNTJMeLh0h3uf7DstuO9d
    -mfnF5j+yDXCKb9XzCsTSvGCN+spmUh6RfSrbkw8/LrRvBUpKVEM0GfKSnaFpOaSS
    -efM4UEi72FRjszzHEkdvpiLhYvihINLJmDXszhc3fCi42be/DGmUhuhTZWynOPmp
    -0N0V/8/sGT5gh4fGEtGzS/8xEvZwO9uDlccJiG8Pi+aO0/K9urB9nppd/xKWXv3C
    -cib/QrW0Qow4TADWC1fnGYCpFzzaZ2esPL2MvzOYXnW4/AbEqmb6Weatluai64ZK
    -3N2cGJWRyvpvvmbP2hKCa4eLgEc=
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/broker.key-pk8.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/broker.key-pk8.pem
    deleted file mode 100644
    index 2b51d015b8ace..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/broker.key-pk8.pem
    +++ /dev/null
    @@ -1,28 +0,0 @@
    ------BEGIN PRIVATE KEY-----
    -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDQouKhZah4hMCq
    -mg4aS5RhQG/Y1gA+yP9DGF9mlw35tfhfWs63EvNjEK4L/ZWSEV45L/wc6YV14RmM
    -6bJ0V/0vXo4xmISbqptND/2kRIspkLZQ5F0OOQXVicqZLOc6igZQhRg8ANDYdTJU
    -TF65DqauX4OJt3YMhF2FSt7jQtlj06IQBa01+ARO9OotMJtBY+vIU5bV6Jydfgkh
    -QH9rIDI7AMeY5j02gGkJJrelfm+WoOsUez+XaqTN3/tF8+MBcFB3G04s1qc2CJPJ
    -M3YGxvxEtHqTGI14t9J8p5O7X9JHpcY8X00sbxa4FGbKgfDobbkJ+GgblWCkAcLN
    -95sKTqtHAgMBAAECggEBALE1eMtfnk3nbAI74bih84D7C0Ug14p8jJv/qqBnsx4j
    -WrgbWDMVrJa7Rym2FQHBMMfgIwKnso0iSeJvaPz683j1lk833YKe0VQOPgD1m0IN
    -wV1J6mQ3OOZcKDIcerY1IBHqSmBEzR7dxIbnaxlCAX9gb0hdBK6zCwA5TMG5OQ5Y
    -3cGOmevK5i2PiejhpruA8h7E48P1ATaGHUZif9YD724oi6AcilQ8H/DlOjZTvlmK
    -r4aJ30f72NwGM8Ecet5CE2wyflAGtY0k+nChYkPRfy54u64Z/T9B53AvneFaj8jv
    -yFepZgRTs2cWhEl0KQGuBHQ4+IeOfMt2LebhvjWW8YkCgYEA7BXVsnqPHKRDd8wP
    -eNkolY4Fjdq4wu9ad+DaFiZcJuv7ugr+Kplltq6e4aU36zEdBYdPp/6KM/HGE/Xj
    -bo0CELNUKs/Ny9H/UJc8DDbVEmoF3XGiIbKKq1T8NTXTETFnwrGkBFD8nl7YTsOF
    -M4FZmSok0MhhkpEULAqxBS6YpQsCgYEA4jxM1egTVSWjTreg2UdYo2507jKa7maP
    -PRtoPsNJzWNbOpfj26l3/8pd6oYKWck6se6RxIUxUrk3ywhNJIIOvWEC7TaOH1c9
    -T4NQNcweqBW9+A1x5gyzT14gDaBfl45gs82vI+kcpVv/w2N3HZOQZX3yAUqWpfw2
    -yw1uQDXtgDUCgYEAiYPWbBXTkp1j5z3nrT7g0uxc89n5USLWkYlZvxktCEbg4+dP
    -UUT06EoipdD1F3wOKZA9p98uZT9pX2sUxOpBz7SFTEKq3xQ9IZZWFc9CoW08aVat
    -V++FsnLYTa5CeXtLsy6CGTmLTDx2xrpAtlWb+QmBVFPD8fmrxFOd9STFKS0CgYAt
    -6ztVN3OlFqyc75yQPXD6SxMkvdTAisSMDKIOCylRrNb5f5baIP2gR3zkeyxiqPtm
    -3htsHfSy67EtXpP50wQW4Dft2eLi7ZweJXMEWFfomfEjBeeWYAGNHHe5DFIauuVZ
    -2WexDEGqNpAlIm0s7aSjVPrn1DHbouOkNyenlMqN+QKBgQDVYVhk9widShSnCmUA
    -G30moXDgj3eRqCf5T7NEr9GXD1QBD/rQSPh5agnDV7IYLpV7/wkYLI7l9x7mDwu+
    -I9mRXkyAmTVEctLTdXQHt0jdJa5SfUaVEDUzQbr0fUjkmythTvqZ809+d3ELPeLI
    -5qJ7jxgksHWji4lYfL4r4J6Zaw==
    ------END PRIVATE KEY-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/ca.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/ca.cert.pem
    deleted file mode 100644
    index 0446700135d39..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/ca.cert.pem
    +++ /dev/null
    @@ -1,29 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIFCDCCAvCgAwIBAgIJANfih0+geeIMMA0GCSqGSIb3DQEBCwUAMBExDzANBgNV
    -BAMMBmZvb2JhcjAeFw0xODA2MjIwODQ2MjFaFw0zODA2MTcwODQ2MjFaMBExDzAN
    -BgNVBAMMBmZvb2JhcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOVU
    -UpTPeXCeyfUiQS824l9s9krZd4R6TA4D97eQ9EWm2D7ppV4gPApHO8j5f+joo/b6
    -Iso4aFlHpJ8VV2a5Ol7rjQw43MJHaBgwDxB1XWgsNdfoI7ebtp/BWg2nM3r8wm+Z
    -gKenf9d1/1Ol+6yFUehkLkIXUvldiVegmmje8FnwhcDNE1eTrh66XqSJXEXqgBKu
    -NqsoYcVak72OyOO1/N8CESoSdyBkbSiH5vJyo0AUCjn7tULga7fxojmqBZDog9Pg
    -e5Fi/hbCrdinbxBrMgIxQ7wqXw2sw6iOWu4FU8Ih/CuF4xaQy2YP7MEk4Ff0LCY0
    -KMhFMWU7550r/fz/C2l7fKhREyCQPa/bVE+dfxgZ/gCZ+p7vQ154hCCjpd+5bECv
    -SN1bcVIPG6ngQu4vMXa7QRBi/Od40jSVGVJXYY6kXvrYatad7035w2GGGGkvMsQm
    -y53yh4tqQfH7ulHqB0J5LebTQRp6nRizWigVCLjNkxJYI+Dj51qvT1zdyWEegKr1
    -CthBfYzXlfjeH3xri1f0UABeC12n24Wkacd9af7zs7S3rYntEK444w/3fB0F62Lh
    -SESfMLAmUH0dF5plRShrFUXz23nUeS8EYgWmnGkpf/HDzB67vdfAK0tfJEtmmY78
    -q06OSgMr+AOOqaomh4Ez2ZQG592bS71G8MrE7r2/AgMBAAGjYzBhMB0GA1UdDgQW
    -BBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAfBgNVHSMEGDAWgBRXC+nLI+i/Rz5Qej9F
    -fqEYQ50VJzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
    -9w0BAQsFAAOCAgEAYd2PxdV+YOaWcmMG1fK7CGwSzDOGsgC7hi4gWPiNsVbz6fwQ
    -m5Ac7Zw76dzin8gzOPKST7B8WIoc7ZWrMnyh3G6A3u29Ec8iWahqGa91NPA3bOIl
    -0ldXnXfa416+JL/Q5utpiV6W2XDaB53v9GqpMk4rOTS9kCFOiuH5ZU8P69jp9mq6
    -7pI/+hWFr+21ibmXH6ANxRLd/5+AqojRUYowAu2997Z+xmbpwx/2Svciq3LNY/Vz
    -s9DudUHCBHj/DPgNxsEUt8QNohjQkRbFTY0a1aXodJ/pm0Ehk2kf9KwYYYduR7ak
    -6UmPIPrZg6FePNahxwMZ0RtgX7EXmpiiIH1q9BsulddWkrFQclevsWO3ONQVrDs2
    -gwY0HQuCRCJ+xgS2cyGiGohW5MkIsg1aI0i0j5GIUSppCIYgirAGCairARbCjhcx
    -pbMe8RTuBhCqO3R2wZ0wXu7P7/ArI/Ltm1dU6IeHUAUmeneVj5ie0SdA19mHTS2o
    -lG77N0jy6eq2zyEwJE6tuS/tyP1xrxdzXCYY7f6X9aNfsuPVQTcnrFajvDv8R6uD
    -YnRStVCdS6fZEP0JzsLrqp9bgLIRRsiqsVVBCgJdK1I/X59qk2EyCLXWSgk8T9XZ
    -iux8LlPpskt30YYt1KhlWB9zVz7k0uYAwits5foU6RfCRDPAyOa1q/QOXk0=
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/proxy.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/proxy.cert.pem
    deleted file mode 100644
    index 6c2f4295c9dbd..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/proxy.cert.pem
    +++ /dev/null
    @@ -1,26 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIEZTCCAk2gAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTE4MDYyNTEzNTYwNFoYDzIyOTIwNDA5MTM1NjA0WjAQMQ4wDAYDVQQD
    -DAVwcm94eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMieX78Yj8OW
    -eBHAneRaCCoO8Qrpj8zGo7h9lCdmi1lBDh1uR2sDbotiHGfJzQn836WcYmyeAvfn
    -qvgr9HCXmXdLgmJ3GT/LVu5GEm6msSDiZQPr9so5lQVioisK4UwJROQsE/J52cyR
    -9o3H6M4FKb6QpoobKa62fSfTumwwulaYaDJuRRGoGIkcRuUQ59EWAaDkD3IcDpAn
    -9mTbnE4Iz+JxSrsZ5DJ3X/m/AqyLWtj6GAfyK9a1dhNdlf2x4JZT1QNtojiBXt95
    -OIZyRBNbHMFniq5gel6wdBkmJWutfcTct7wKa2LCxLpKoDIc1HWoL3+RUzOKxYIP
    -0qXEQ3bmONkCAwEAAaOBxTCBwjAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF
    -oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp
    -ZmljYXRlMB0GA1UdDgQWBBSgsgfmDbXEkrrpHUC9GnDDjxaKizAfBgNVHSMEGDAW
    -gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw
    -FAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0GCSqGSIb3DQEBCwUAA4ICAQB+uZ2OWR+G
    -sRqYHEeVqUI8G2p8Np8eC/onGpBL8Gj9SGxIQcIPtqHPUlCe9fd/96JOptOGRYEB
    -BmUCaCmQ4IgMW6e6fArka5IB4XXIgHFXyQ6ImTvjDavzlVw06zn9S4dLwVzsRBg+
    -GS9svtq23W+f5rEN5N+7LhtcbclfiG4VCCqDG5VhkzEok+SRamDI8rDRZtodMw0O
    -/+L+xaaQPUPjX8KUlKn4uVpCDbxUzHonlCPzbkHHm5su0D4ysjJIy3/y3yow6JE/
    -02L7PZkmkmw3/V+84T3X8/GD15sVUv/3v1gXEBxYwAs+RNTJ0APvMEMSvCq0AMfF
    -bPMZBuAGNBG7lv7TovzHgGFKXT7du5OFF/qjAsEffhbo224CB96fgwvvndwHHBFh
    -J06BvHZG1i9dDVhUKoB1owkWrE4RZv2ZKEtZYgizzSmzZRHtARo0t1Byc5djx1tX
    -TkJOHshNqJZOY1ER0DPaVQgKI+PRTbEdj/xPGRX3ebSqDmilAfPXshqgElfch6Yl
    -f2V58TyCnjXOibvkG9D5OyCdWLEECumOZgYar0KZgNfrvTOi1OKXnX1fsbh29fWA
    -ICZRcdmjkz79zQXY2SuzCWlskuXPKAmW1AMqs+l6ormmKfzIUx3Yriy4LqIIYY1v
    -uQD5vghZmd9HUg2KaXfSGD9stGCD8KhntA==
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/proxy.key-pk8.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/proxy.key-pk8.pem
    deleted file mode 100644
    index 70e0107f2741f..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/proxy.key-pk8.pem
    +++ /dev/null
    @@ -1,28 +0,0 @@
    ------BEGIN PRIVATE KEY-----
    -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDInl+/GI/DlngR
    -wJ3kWggqDvEK6Y/MxqO4fZQnZotZQQ4dbkdrA26LYhxnyc0J/N+lnGJsngL356r4
    -K/Rwl5l3S4Jidxk/y1buRhJuprEg4mUD6/bKOZUFYqIrCuFMCUTkLBPyednMkfaN
    -x+jOBSm+kKaKGymutn0n07psMLpWmGgybkURqBiJHEblEOfRFgGg5A9yHA6QJ/Zk
    -25xOCM/icUq7GeQyd1/5vwKsi1rY+hgH8ivWtXYTXZX9seCWU9UDbaI4gV7feTiG
    -ckQTWxzBZ4quYHpesHQZJiVrrX3E3Le8CmtiwsS6SqAyHNR1qC9/kVMzisWCD9Kl
    -xEN25jjZAgMBAAECggEAJJyCbKVW1yLGlrbIGbw0cTh41Lz6+SvnBOwl9WrJU2iD
    -4usVLXpa2iT1ehthx8jWJ6r6a0gK0qL8mH2tBj8kSpkFGmMRwIqjOqifBIJ3IMEw
    -Hh8Z0p3fjDQL1D8QDohCgkFpAn8qOCMLE6S/35khnR1Yxytd1/yFqpcBFm1uFA85
    -dYisjPqWm/IZIU5rH0zgKAIhtvl9abnoi93443EHsKpRAW1gwRXx9Aak7TV768bZ
    -tELBsaTnXnNzamDiaimmxEOlqR9O0W8JE/31KFL26JcVmsTRG7sMpoUxCEMjOuGZ
    -J30bXFZUW6NrDpFsQ7uTqD6TNn2971N8KFCLnC/JYQKBgQDo82axC7L7n7BYTu18
    -dupeT7n5dTBD/I3l0KtT05xiZA8GZr2i+pt+/aWzCzK4/Ee4jb4/o8CRQRB5v5mo
    -c9lc+BaoAIQiwiw+aufT+UojrcijrOMEL5Zk3zdZ2rcEoAsVvqtejNnwLCGI9Rnl
    -gp7n9oRhwDIv9Fu09snUougE3wKBgQDceAGKUB8pGd3eEya/0jU9J60LsKbcJSsN
    -4v1S5LiPtHOyhr0g4x/LibMP2PJhG3tJ1bgpaGmn9du2D20M6ukRhIYyn/7G+N+A
    -oqryyvO1MMYnhc4IEQvWrzDnBM0hV2bdjp4s/1ASVHVRwk8+orqxysIJ1D75nnRX
    -Tyfl6HgBRwKBgQCfVlWIhiMPv6OkU6BXgRNAHTJs8f5okmgQqNF3jgequRwZ2c6e
    -muIfU6myNNel9lGsZ6+Y4g4GjMWTMT4OHeewkrUUhv3atIwEyaT2tc5DZ0wUwF2r
    -cE1jg9bdbB/BVyMd5YRcMOWlRNpPTq8+8EB3E4RrREZPzMmplyBohGFFawKBgGjc
    -P0dM8nU3E1rj2wNTdPUAYQL1Y3fDyeWR+BEsLkhTeNAJ2/y/akkB1oQMGMRtMMee
    -ejhfrBkyC+1dCu4g8PffA4EirihvCMcDF7HhK+cbKrRzpNobWXkj3GuU0ggwrQFm
    -Kv+V87y0JRTdCZnuBkQ3/vBz3fwWDJnWUVC9sA5TAoGBAN4t2gJCZax5yInUgyWi
    -Tgumb2qVWsGBBLMTKIsrkK/KphzgAhHcVhDCybA79TmIM2FfIlpf6TE7Mv4NI055
    -ZJzHX+GMT5Czy2Ku9MJD3PLTFN5MjYb6g92fKViLDMI6fwzTHB8xPJ7Ob9bV2srS
    -ZlmKNXTkZFk3/orK4WdCZufz
    ------END PRIVATE KEY-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/randouser.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/randouser.cert.pem
    deleted file mode 100644
    index 01b69eacc32cb..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/randouser.cert.pem
    +++ /dev/null
    @@ -1,19 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIDCDCCAfCgAwIBAgIJAMPzPm3QygzRMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV
    -BAMMBXJhbmRvMCAXDTE4MDYyNzA5MDA0OVoYDzIyOTIwNDExMDkwMDQ5WjAQMQ4w
    -DAYDVQQDDAVyYW5kbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALKR
    -nN1y/aBrhbNM6kDfBmWTaKFuANEZ+VST++AVImssN+FyOL2ukxuT74JW0b9g9scl
    -aTYzUBbKuTrMIf77RPIJY6R5E6JWn12UhDpnLa9eYGOkyPyoSp5hdyJj3spElWkl
    -eGRY8g9zsWJO6QDpTL4g2VAtWTjFIVI3l3enNgNp3hJnJkdvxyBuRN7Szg9hGPmQ
    -Tp9rsKkZQD4SJ9/WMAlqSqeB6rSn20rydMJCnI92TT8hlv1wZcVXSVrJypzZb27t
    -aAi5o9GEtag3aXalR8zL9DiUk64338A0fYgf2/GSjomZehcg17h8tElYMFV6hJqS
    -95gM18nfwldttbihjKMCAwEAAaNjMGEwHQYDVR0OBBYEFAzmiWmqQIB10sSoSAzb
    -claQvBPtMB8GA1UdIwQYMBaAFAzmiWmqQIB10sSoSAzbclaQvBPtMA8GA1UdEwEB
    -/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4IBAQCN7nup
    -rGd4sVuP2vvcRbhGo1JCWDS4Uwe7nbSR/4QTAlyUYK8KIwTvfxeEeO4yBGmiDzOE
    -+W2y0i22qVnShNCBZ50Evm+FkIEcwlnLtAJt6zIrE4cWdqW1a6CMp9MJkONjACOx
    -3Y560SlqQuoKa41PtXUnk8tFT8Asz7b3NtEfmrBlZJpSSM5exBOCH/Enwzac890u
    -zJuweTz4aO8aefunkPMKlBPliJ6EkEYclrjJqxi+4VrEMtAIRxwKXPvnHnXESaF7
    -yUE4qrjQa4B+Bn1DWqfLcHJCVDxqYrXiEAN/YtX0UOqxZvGd49eNIs2vQkET0+55
    -J3hW25OLSnPjAO/j
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/randouser.key-pk8.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/randouser.key-pk8.pem
    deleted file mode 100644
    index 86a7fa387a8c7..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/randouser.key-pk8.pem
    +++ /dev/null
    @@ -1,28 +0,0 @@
    ------BEGIN PRIVATE KEY-----
    -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCykZzdcv2ga4Wz
    -TOpA3wZlk2ihbgDRGflUk/vgFSJrLDfhcji9rpMbk++CVtG/YPbHJWk2M1AWyrk6
    -zCH++0TyCWOkeROiVp9dlIQ6Zy2vXmBjpMj8qEqeYXciY97KRJVpJXhkWPIPc7Fi
    -TukA6Uy+INlQLVk4xSFSN5d3pzYDad4SZyZHb8cgbkTe0s4PYRj5kE6fa7CpGUA+
    -Eiff1jAJakqngeq0p9tK8nTCQpyPdk0/IZb9cGXFV0laycqc2W9u7WgIuaPRhLWo
    -N2l2pUfMy/Q4lJOuN9/ANH2IH9vxko6JmXoXINe4fLRJWDBVeoSakveYDNfJ38JX
    -bbW4oYyjAgMBAAECggEAf8cSqKQQOSq3kYYIWkM9IJJK3LkKfJZJg+wg4Eg/SNFr
    -azeAwrqZKbLCQFI/5OJNtFNg5hfxx11pDlnkOcEzpL5zPs4k7pVtlFkiBWivmD3A
    -W40fBSynuI2l4kX0tmg9QfA+JhA/pi7zT5WHxc8ryyFWX7kTjzwAjASbrlNIo0d7
    -e5SMGcZUeH7m/+pCw626nhpexAJ5Fm9/uc0Zwh4xrzS6j2xoqhOqvQb175V/Iv6/
    -8nBq/VjE5LqoP57gqw/Cs4zwwBFayr/WaitGVikga2eZI3q/GFMtez27cdMxEZhI
    -9ZP1y8kzlzs1QPjdJQUxIE+/zIm3w8IL4iO0SQE5AQKBgQDmSYazZBkmixUzvBhg
    -8tGTWJOAG0NhgBamkbaR6s1L8ou9A5MGbFbRBbc5zRmiyMC2Q4D6Xu0ut2N93tUr
    -DkxZa35dt3HWMRmNByjHtpMzapYnmDpuF4k7dy+81GkUbd2ZDYadnknjiaFaDZ7j
    -9JxUENIwzzLhFMx7emBQ1+3zewKBgQDGgcewYthAsCTAcBtGKw6G39jAYCpr2N7X
    -/B5W5n4VXVHKq35wK1bHRnHj7dkm4sH572XB1tPoMrnQ7j9i+XSkc1Ixo0iHr9H0
    -QauVQKQqzkvI5cSaWS5N6ZRk6GOGxI0SwYcNNdqJzEEJuYHCTUpff4d6utMgpTZ6
    -SQJw8u0O+QKBgE2VEcNYAr0geDkgslnfFEn+ulqbVL0BSSA+0PIh154xjXBVRvAQ
    -CcOLmGnptixIU9xTq50t49wsPmGGc+x4ebJaa40pIznU+tWvRsbZtIfK7eFTAMRc
    -O4iEI9oK+Ye/Z7uLegGZ9SyqDmjnU9NaclxD+nwlIfAAcM9csBwsUucHAoGABjJ7
    -B3iug6Z8Hz3gvBoQBAns/GSELoXAv0FxuQjNGuGk8gzUj6/qr6H1YEZGpz4hDCp7
    -JMgOKYub3XfypqZfC9tFz6LnWsUUaum575jrByMVnpn9v0vVdD08ksHmiYiNVu6P
    -xsvNnMuxpBoUgPpkvgJ/Okem27gMsViiKOCMohECgYEAt3MwlVILN0t6RPBCpTKB
    -nrh6cxx8kQ2TI1UP44WciBkGVvIQOEbl+1955lt/LAECzooiBJOkKubcPK22sJPS
    -MZauEwdAqRQj/uUs3oRISUjo0H/IKGI3O23jhIAI2XC/zkQTE6dKjhXxJIl+5a8f
    -v9VhnIlgSfaxu3R0l7SvQlo=
    ------END PRIVATE KEY-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/superproxy.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/superproxy.cert.pem
    deleted file mode 100644
    index 9656e2c8ba0d2..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/superproxy.cert.pem
    +++ /dev/null
    @@ -1,26 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIEajCCAlKgAwIBAgICEAQwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTE4MDYyNjEzMjUxN1oYDzIyOTIwNDEwMTMyNTE3WjAVMRMwEQYDVQQD
    -DApzdXBlcnByb3h5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA43Sn
    -ys/h3wxJ/IBEzbJdQAKCxU4of5KgTDzFoOaS8C63Nwbjgy0qcWYdRDceP4lJIKSA
    -1+JZ+R1opyrfVIC2D9oDJFIJTFfXy9G9VYDccwAONPgAamvRzBnYcEu8fqM+Ohle
    -kZltKktAHnX3WtG3RyxEL5nYzlMlGwUXJu3Rxc9SlkYSxERzjWHikGqGmXLX2qB0
    -k6oyxTrK+4+EHk3khbEIqyQZSOFbD7NMfnRy0CWFv/9T1shgjAIBCake6jY7lwaT
    -S7JvbLfG6ABf5xHMxoWLXa2qwb+Ar43Ff9g8kKZwFOxMeGcwkzyJPBbWytFxaWn+
    -R2RHhTaVCGc22CjdfQIDAQABo4HFMIHCMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEB
    -BAQDAgWgMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBDbGllbnQg
    -Q2VydGlmaWNhdGUwHQYDVR0OBBYEFHEgLhfH0Z1QlLxeQbG7YZyBlz1MMB8GA1Ud
    -IwQYMBaAFFcL6csj6L9HPlB6P0V+oRhDnRUnMA4GA1UdDwEB/wQEAwIF4DAdBgNV
    -HSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwDQYJKoZIhvcNAQELBQADggIBAK9q
    -3YnEa99Cq3Vo3g6PE/A/xOE97seVNuavqyBcVv9PrTydb5XG4jPkYU3xOYXIc8rA
    -A4gzd+AsGO9rjGMPGGjQJI3JO/3BCeBJMkn50C/rM1yWVMHnVyFcJlg16xaWtj1e
    -2Jk8egJW2gSYyF+N2TdzI7tOb002GNr36posnqO+IOoLapyHFBxxUjsPDRoo8fJn
    -myWsV1Y9oRUZyJlfIAJsu85ew7gDBY2jaiEiopzour3uU3C0N7gYni2OmVwfr6J8
    -R2/Jp43BSD5sYOW9RAJIEEXef+InYtz9HTJvKu2LsWwIBkaztk29tJcDE+1La6Sw
    -0dF0YkUwnXoGQFjiV+8pXX3TF5glXKj1rU8WfNazF6lqslB6DmdgR3/FQ6Z2sE86
    -d9hVtayZIGlzU0rWmBBtr++7Wo88nmzAtd/xbZMFG8U//+Q2AvJT2oVGtqM48+al
    -rnsN/gYrLDr7RC14bHIuO1v6ZL/rAi7SPKrKYAyQVTAcRuW516SxxR6S1Xa1ITnh
    -rwgKg13eQuwu3iigguIS9XL6nAXabBIxBxMl6o2YlyIPekKYIcQmpqhkavJ6VOgX
    -iq9VdY6fIJVfmxNZuwM3/28k7UeUAfhI2SSVH4ZURbPiGGH2wukc1QkmtZ2cNa35
    -C1y79aqJbIa3ErqLFPj/fM+34x8L7QHPq6RfaODa
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/superproxy.key-pk8.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/superproxy.key-pk8.pem
    deleted file mode 100644
    index 2e4140b8fb392..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/superproxy.key-pk8.pem
    +++ /dev/null
    @@ -1,28 +0,0 @@
    ------BEGIN PRIVATE KEY-----
    -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDjdKfKz+HfDEn8
    -gETNsl1AAoLFTih/kqBMPMWg5pLwLrc3BuODLSpxZh1ENx4/iUkgpIDX4ln5HWin
    -Kt9UgLYP2gMkUglMV9fL0b1VgNxzAA40+ABqa9HMGdhwS7x+oz46GV6RmW0qS0Ae
    -dfda0bdHLEQvmdjOUyUbBRcm7dHFz1KWRhLERHONYeKQaoaZctfaoHSTqjLFOsr7
    -j4QeTeSFsQirJBlI4VsPs0x+dHLQJYW//1PWyGCMAgEJqR7qNjuXBpNLsm9st8bo
    -AF/nEczGhYtdrarBv4CvjcV/2DyQpnAU7Ex4ZzCTPIk8FtbK0XFpaf5HZEeFNpUI
    -ZzbYKN19AgMBAAECggEBAM1JAwuD1drmj3wKFI8F1S2pVndXFCwXnP9RthiDIckO
    -kKNkX0CMKgtQ20cu6+jyMgL5FaRCkWvJxCNkCU6OIENsQ3urYuL5QTWeZeBevhg4
    -y5m43z8tcptgFD09zbEKCmaLcRO9wo3yfrs/QvE/58efxyajFs8YsZuSW5Px/msl
    -AFN8qGY0q0lQbQPK8cPYT4OnW9isqEjkvHq1Q+ryv4cxsGWtBZYmJVeLVHzClihi
    -lODdN6YsDcu9jLwsA8o2WSBRsbLHK6caW4FdEwgOLckc0EGGEU64VEB9ZOwgcwYQ
    -M9mZDs2w7qk63YnDH4KmpoP0Oc8N+bG7Pkifd7f8HqECgYEA8esMQya6Qln4O7Qo
    -aI8X9t5gCpq+bh/P+7vYdBR/9St5VtE/P0yuaQdT2e5t+Ei4ujqHvJ0GTPmqYIJf
    -2DNvJMu8EnXv+nqKQCsyMc/nqQN1CcRGqZssH1V/ZIQLRUA0cN4hzE0eHITwp6U9
    -vD/WaE3mKX/XHthIjc8hnuoajOkCgYEA8LIYMgyD3wClWOKe5TkBumI0KvA9tP90
    -IFHu1wLNl0tKBpsXkzzxiav1FMX3K7B29vxukrs946KRESHzRg4c5ULPnMmwcQyx
    -AortKJWrGVsna/QDWPRutXSN1XmnjKWsHmTpQQqLfaRvLW+Hd34+EVdVh9sVt/y9
    -RucnBvxcX3UCgYBebm/U7pMaP2BkfcigN+sU1G0M9qaK+iQHkaXGehIQs62js/5K
    -STZzjQawNR/8IPbqytodR/YjqflVvs6G6FzkMhrx4dORJLA+qB3pz8wP72eKLnGe
    -1xF8EbWumNSFbbCKtkrfIuM0IriF2Dym9QxOnsnPPTXNtoNrx4TKMXu3sQKBgBq9
    -noSI8WmoF7adTsvmnnOHj4YptKFUNCGXGLLYg+DII4xCVMct4SPLb+oD6Gb5Lu5X
    -sy0oEkMk/3roy68/yCQMXSZtHeYhY9UFfD2jCyRBBUswC+MpHNeaAFv0LRIqIcoq
    -qeNo+YBW8WcZ2fIDm3+vtTfntiz/rkOfUK2tAdI1AoGAL2LVDF5e1meSRocEoy8e
    -gqrshrhg+KBqYmcjtd4Iup4WvR6uyH4qE9yFLLHFZ04pZzXLHcPfqgOSP8qTxgH3
    -h0uqtcYmejS/yl6PbC9OPXcSkMgntRkTbU9Ug05ijfN9NRnW+8WXvi8Z6DKed1P1
    -9PxwA75P9o0h00mMk8PQitQ=
    ------END PRIVATE KEY-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/user1.cert.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/user1.cert.pem
    deleted file mode 100644
    index 072f2867fe62b..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/user1.cert.pem
    +++ /dev/null
    @@ -1,26 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIEZTCCAk2gAwIBAgICEAUwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTE4MDYyNzA4NDAyNVoYDzIyOTIwNDExMDg0MDI1WjAQMQ4wDAYDVQQD
    -DAV1c2VyMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM5iqgr4PUUZ
    -AW9MDGP5cBaSJALSPV63m6M/IoovrMWJ9CGtcQfZTUHwDorIlXgQ6H/KufmsHW0Y
    -OQbChLSTDB14D0jSMtyv6+ibSoE1ZEl2SbB1miLd0P5AS9YmzzEW2+bx0zJORLYD
    -PzJ1Nh3/kQlRs04IECki291WZiVRzX2JRoL7kMtOAoKJqQfsT14Oi9EAw39VhLeB
    -uc/Mx6Jsutq/YdXakoZtQbfZka2MMfLXgMDLIPDqbU+09q7au2dq8RjGzrWnxnOX
    -o/XQssrIbwzJJYASBsgAAtnAw7bPzCX6+cL6PZVRyiEZov0HKXyRyvrbQ5hyEMuS
    -3dHqoKt0fKMCAwEAAaOBxTCBwjAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIF
    -oDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRp
    -ZmljYXRlMB0GA1UdDgQWBBQ7NSD6lx6Vq38cEoD5l7FHs1Ej8DAfBgNVHSMEGDAW
    -gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJzAOBgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYw
    -FAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0GCSqGSIb3DQEBCwUAA4ICAQC+cU7ctY7o
    -eTW+oHTq9EGJUMwW1fOww4QDrHtgZT4OYkO88zxQV2Cr050p8eaV5dHXZBf9/bRV
    -7hPNV5+HpQhb9TZ9xK2WRZ2QV7a/UDyUnksVKGSK9tNZMZPueOEB19e4bIBcgnQa
    -5i9sgZr93na7pFOY7lBQy6gfaOcnejYHmvVqIGaZBVH8rkEsGhhkJxy7qkpFNKgf
    -PGiIRo9L0WYqDCSiaICeCiteJwIfjsUFJKF0YnpXZq1kFfQscnleg60MWZAXvacp
    -tAciE51Ow60cqQWER66iwqnBSPD4l91SxAaGQAmalgCioGsYSbojXcOvRidhYJ2T
    -3YwCpqlC0qC9D2ZmNoukb1a0Pi03MuSJwD/8v9eqwEW9dFAzdnWDzTZMN9CfdjVh
    -2qiO5o5Si/X1Dmjdk2F/EM62YJQBAlkZBetFJ0o2QPGTSD+zrpfITIW8Pu+/5zcC
    -MZdzyUf0p1GO2Kn7wmqPQjz59zABagmxCNks8HeqPnzmWuADMaggb0nOmrBACE2x
    -b9XR6/xaXpwTRf0h5N3evivzUHo6XVw8A3gVUNoBm9Of3PlAsjM4I4SWFb6nrwYv
    -RnI04c+R95Su1fMc2wky0PmW+iWRTaEN/cUdX1SF6jo1nRLELGcbMSGUI6UI8kff
    -crvCz7uLu7Lr5/CKnEm2bSCZ4eIQpOs4nQ==
    ------END CERTIFICATE-----
    diff --git a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/user1.key-pk8.pem b/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/user1.key-pk8.pem
    deleted file mode 100644
    index eec5c94c0665c..0000000000000
    --- a/pulsar-proxy/src/test/resources/authentication/tls-admin-proxy/user1.key-pk8.pem
    +++ /dev/null
    @@ -1,28 +0,0 @@
    ------BEGIN PRIVATE KEY-----
    -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOYqoK+D1FGQFv
    -TAxj+XAWkiQC0j1et5ujPyKKL6zFifQhrXEH2U1B8A6KyJV4EOh/yrn5rB1tGDkG
    -woS0kwwdeA9I0jLcr+vom0qBNWRJdkmwdZoi3dD+QEvWJs8xFtvm8dMyTkS2Az8y
    -dTYd/5EJUbNOCBApItvdVmYlUc19iUaC+5DLTgKCiakH7E9eDovRAMN/VYS3gbnP
    -zMeibLrav2HV2pKGbUG32ZGtjDHy14DAyyDw6m1PtPau2rtnavEYxs61p8Zzl6P1
    -0LLKyG8MySWAEgbIAALZwMO2z8wl+vnC+j2VUcohGaL9Byl8kcr620OYchDLkt3R
    -6qCrdHyjAgMBAAECggEBAK8J8QvytAxJg/T/+7ZC1PTfp1kZNGGDuaV/o2ytuIul
    -T//MGQQ+IY8d6Ud9jX9SX84agxalCiP/mkYIbgK0gF7x94yccfTH433ZTxw8yzye
    -7SqS41JU7K7mmysaqTkKGSFK0gNlbFMud8f0rxxMJ5dOypMQtZwd63lSkLlwIqcn
    -YhKcAdXM4WGv07MFdwAXvrK2trhBBkCA08ZzyVy4lWE+cVfEkwH6O8EdGchl7g4U
    -LqIkYzufQWKdH9frt6N3qEV/qs0s1qipVzV07GaPpEdK/G5exJ7NjZaXJkHOuMbt
    -7ae1zJBexW41/++fjzsWSYljvSEphjqwrGYrr+tMGNECgYEA+Sg1sHcYO4Bg16qG
    -c8XM9g8DFL396X4MnMh+AFrDV8dbdz39x9fFtpKEfqaAZrG6I8fW3RkMLsJvJ4GI
    -KDJNz2ezEMbNKlXE9UzvAsrdvWNWDGJvrDu0CbJg2wtBeVEBIUYetSdNHoEwY7ZJ
    -QHnwbxYYrUFIJ34rI0SQ17/Z4vsCgYEA1A27jYTWr86JmvjQDUiJcJsbhpa/6OzZ
    -n3KTu0n0oCvl7uvvjUfhlzmcq7kyY0oO1C5TQHLiqBaI5hXw+iYwnESLIn7BwhMX
    -dcxglvXx95h1NqHYX9GnURl2QtrjmuY5yx+itfmZCRkRPO3c5e9uZyf01CeE4uwl
    -hDNh0RrSXHkCgYBvlQhmXQ+nJhk4vI+2LXFbCOISWfvqo562YDu9oOg22Xsm7cZH
    -x2QuHXPk3GBInXOFLqwVHHCOSFlLUgFOLykVp5VUABRFz1+Dk86+a2fetywEI9lr
    -QtmgNhiWQHY0BIkDA8ogytcIwEaRgUNQ8sswlK68eK39sc1T4BMV7D+CHQKBgG3g
    -+8lWBwSsKgOCYBQx/P27caTo4mJosE99yG0o4jhI5ulJmiSEFbINqVAWM7TdQBfU
    -NVFU9nuQybknr2l/dnrSzaG/Otk8mVBx6a7vnETm2/3GGV91PJS6c9wqnfu6xkGp
    -j99piVH8ikEfI/KFgZi0TJnOLH6FTN9W3J3EnzJJAoGAE4ZLLi+2RP4ZYXI11CJC
    -BSM1AEpqemgTUSidZyTiJJMGGrC7tyEba+4TIqeGgvD74p57XFbN7LOJWmnAiK8b
    -BLhQqPgOCXqrna00GnNKKx7AzgYHqlq03tGlX858aH18rkSRVk3UhkTkGTSr3iQn
    -aJeN/0HbpGr67bimf4bqlCY=
    ------END PRIVATE KEY-----
    diff --git a/pulsar-sql/pom.xml b/pulsar-sql/pom.xml
    index 1f8972bf22219..5e589018b7e8e 100644
    --- a/pulsar-sql/pom.xml
    +++ b/pulsar-sql/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    4.0.0
    -    pom
    -    
    -        org.apache.pulsar
    -        pulsar
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-sql
    -    Pulsar SQL :: Parent
    -
    -    
    -        
    -        3.14.9
    -        
    -        1.17.2
    -        213
    -    
    -
    -    
    -        
    -            
    -            
    -                com.squareup.okhttp3
    -                okhttp
    -                ${okhttp3.version}
    -            
    -            
    -                com.squareup.okhttp3
    -                okhttp-urlconnection
    -                ${okhttp3.version}
    -            
    -            
    -                com.squareup.okhttp3
    -                logging-interceptor
    -                ${okhttp3.version}
    -            
    -            
    -                com.squareup.okio
    -                okio
    -                ${okio.version}
    -            
    -
    -            
    -            
    -                org.jline
    -                jline-reader
    -                ${jline3.version}
    -            
    -            
    -                org.jline
    -                jline-terminal
    -                ${jline3.version}
    -            
    -            
    -                org.jline
    -                jline-terminal-jna
    -                ${jline3.version}
    -            
    -
    -            
    -            
    -                org.slf4j
    -                log4j-over-slf4j
    -                ${slf4j.version}
    -            
    -            
    -                org.slf4j
    -                slf4j-jdk14
    -                ${slf4j.version}
    -            
    -
    -            
    -                io.airlift
    -                bom
    -                ${airlift.version}
    -                pom
    -                import
    -            
    -        
    -    
    -
    -
    -    
    -        
    -            
    -                org.apache.maven.plugins
    -                maven-checkstyle-plugin
    -                
    -                    
    -                        checkstyle
    -                        verify
    -                        
    -                            check
    -                        
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            main
    -            
    -                
    -                    disableSqlMainProfile
    -                    
    -                    !true
    -                
    -            
    -            
    -                presto-pulsar
    -                presto-pulsar-plugin
    -                presto-distribution
    -            
    -        
    -        
    -            pulsar-sql-tests
    -            
    -                presto-pulsar
    -                presto-pulsar-plugin
    -                presto-distribution
    -            
    -        
    -        
    +    3.14.9
    +    
    +    1.17.2
    +    213
    +  
    +  
    +    
    +      
    +      
    +        com.squareup.okhttp3
    +        okhttp
    +        ${okhttp3.version}
    +      
    +      
    +        com.squareup.okhttp3
    +        okhttp-urlconnection
    +        ${okhttp3.version}
    +      
    +      
    +        com.squareup.okhttp3
    +        logging-interceptor
    +        ${okhttp3.version}
    +      
    +      
    +        com.squareup.okio
    +        okio
    +        ${okio.version}
    +      
    +      
    +      
    +        org.jline
    +        jline-reader
    +        ${jline3.version}
    +      
    +      
    +        org.jline
    +        jline-terminal
    +        ${jline3.version}
    +      
    +      
    +        org.jline
    +        jline-terminal-jna
    +        ${jline3.version}
    +      
    +      
    +      
    +        org.slf4j
    +        log4j-over-slf4j
    +        ${slf4j.version}
    +      
    +      
    +        org.slf4j
    +        slf4j-jdk14
    +        ${slf4j.version}
    +      
    +      
    +        io.airlift
    +        bom
    +        ${airlift.version}
    +        pom
    +        import
    +      
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-checkstyle-plugin
    +        
    +          
    +            checkstyle
    +            verify
    +            
    +              check
    +            
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      main
    +      
    +        
    +          disableSqlMainProfile
    +          
    +          !true
    +        
    +      
    +      
    +        presto-pulsar
    +        presto-pulsar-plugin
    +        presto-distribution
    +      
    +    
    +    
    +      pulsar-sql-tests
    +      
    +        presto-pulsar
    +        presto-pulsar-plugin
    +        presto-distribution
    +      
    +    
    +    
    -        
    -            owasp-dependency-check
    -            
    -                
    -                    
    -                        org.owasp
    -                        dependency-check-maven
    -                        ${dependency-check-maven.version}
    -                        
    -                            
    -                                
    -                                    aggregate
    -                                
    -                                none
    -                            
    -                        
    -                    
    -                
    -            
    -        
    -    
    -
    +    
    +      owasp-dependency-check
    +      
    +        
    +          
    +            org.owasp
    +            dependency-check-maven
    +            ${dependency-check-maven.version}
    +            
    +              
    +                
    +                  aggregate
    +                
    +                none
    +              
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/pulsar-sql/presto-distribution/LICENSE b/pulsar-sql/presto-distribution/LICENSE
    index 214a928a74941..9e1beb639f4c9 100644
    --- a/pulsar-sql/presto-distribution/LICENSE
    +++ b/pulsar-sql/presto-distribution/LICENSE
    @@ -207,96 +207,100 @@ This projects includes binary packages with the following licenses:
     The Apache Software License, Version 2.0
     
       * Jackson
    -    - jackson-annotations-2.13.4.jar
    -    - jackson-core-2.13.4.jar
    -    - jackson-databind-2.13.4.2.jar
    -    - jackson-dataformat-smile-2.13.4.jar
    -    - jackson-datatype-guava-2.13.4.jar
    -    - jackson-datatype-jdk8-2.13.4.jar
    -    - jackson-datatype-joda-2.13.4.jar
    -    - jackson-datatype-jsr310-2.13.4.jar
    -    - jackson-dataformat-yaml-2.13.4.jar
    -    - jackson-jaxrs-base-2.13.4.jar
    -    - jackson-jaxrs-json-provider-2.13.4.jar
    -    - jackson-module-jaxb-annotations-2.13.4.jar
    -    - jackson-module-jsonSchema-2.13.4.jar
    +    - jackson-annotations-2.14.2.jar
    +    - jackson-core-2.14.2.jar
    +    - jackson-databind-2.14.2.jar
    +    - jackson-dataformat-smile-2.14.2.jar
    +    - jackson-datatype-guava-2.14.2.jar
    +    - jackson-datatype-jdk8-2.14.2.jar
    +    - jackson-datatype-joda-2.14.2.jar
    +    - jackson-datatype-jsr310-2.14.2.jar
    +    - jackson-dataformat-yaml-2.14.2.jar
    +    - jackson-jaxrs-base-2.14.2.jar
    +    - jackson-jaxrs-json-provider-2.14.2.jar
    +    - jackson-module-jaxb-annotations-2.14.2.jar
    +    - jackson-module-jsonSchema-2.14.2.jar
      * Guava
    -    - guava-31.0.1-jre.jar
    +    - guava-32.1.1-jre.jar
         - listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar
         - failureaccess-1.0.1.jar
      * Google Guice
         - guice-5.1.0.jar
      * Apache Commons
         - commons-math3-3.6.1.jar
    -    - commons-compress-1.21.jar
    +    - commons-compress-1.26.0.jar
         - commons-lang3-3.11.jar
      * Netty
    -    - netty-buffer-4.1.89.Final.jar
    -    - netty-codec-4.1.89.Final.jar
    -    - netty-codec-dns-4.1.89.Final.jar
    -    - netty-codec-http-4.1.89.Final.jar
    -    - netty-codec-haproxy-4.1.89.Final.jar
    -    - netty-codec-socks-4.1.89.Final.jar
    -    - netty-handler-proxy-4.1.89.Final.jar
    -    - netty-common-4.1.89.Final.jar
    -    - netty-handler-4.1.89.Final.jar
    +    - netty-buffer-4.1.100.Final.jar
    +    - netty-codec-4.1.100.Final.jar
    +    - netty-codec-dns-4.1.100.Final.jar
    +    - netty-codec-http-4.1.100.Final.jar
    +    - netty-codec-haproxy-4.1.100.Final.jar
    +    - netty-codec-socks-4.1.100.Final.jar
    +    - netty-handler-proxy-4.1.100.Final.jar
    +    - netty-common-4.1.100.Final.jar
    +    - netty-handler-4.1.100.Final.jar
         - netty-reactive-streams-2.0.6.jar
    -    - netty-resolver-4.1.89.Final.jar
    -    - netty-resolver-dns-4.1.89.Final.jar
    -    - netty-resolver-dns-classes-macos-4.1.89.Final.jar
    -    - netty-resolver-dns-native-macos-4.1.89.Final-osx-aarch_64.jar
    -    - netty-resolver-dns-native-macos-4.1.89.Final-osx-x86_64.jar
    -    - netty-tcnative-boringssl-static-2.0.56.Final.jar
    -    - netty-tcnative-boringssl-static-2.0.56.Final-linux-aarch_64.jar
    -    - netty-tcnative-boringssl-static-2.0.56.Final-linux-x86_64.jar
    -    - netty-tcnative-boringssl-static-2.0.56.Final-osx-aarch_64.jar
    -    - netty-tcnative-boringssl-static-2.0.56.Final-osx-x86_64.jar
    -    - netty-tcnative-boringssl-static-2.0.56.Final-windows-x86_64.jar
    -    - netty-tcnative-classes-2.0.56.Final.jar
    -    - netty-transport-4.1.89.Final.jar
    -    - netty-transport-classes-epoll-4.1.89.Final.jar
    -    - netty-transport-native-epoll-4.1.89.Final-linux-x86_64.jar
    -    - netty-transport-native-unix-common-4.1.89.Final.jar
    -    - netty-transport-native-unix-common-4.1.89.Final-linux-x86_64.jar
    -    - netty-codec-http2-4.1.89.Final.jar
    -    - netty-incubator-transport-classes-io_uring-0.0.18.Final.jar
    -    - netty-incubator-transport-native-io_uring-0.0.18.Final-linux-x86_64.jar
    -    - netty-incubator-transport-native-io_uring-0.0.18.Final-linux-aarch_64.jar
    +    - netty-resolver-4.1.100.Final.jar
    +    - netty-resolver-dns-4.1.100.Final.jar
    +    - netty-resolver-dns-classes-macos-4.1.100.Final.jar
    +    - netty-resolver-dns-native-macos-4.1.100.Final-osx-aarch_64.jar
    +    - netty-resolver-dns-native-macos-4.1.100.Final-osx-x86_64.jar
    +    - netty-tcnative-boringssl-static-2.0.61.Final.jar
    +    - netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar
    +    - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar
    +    - netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar
    +    - netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar
    +    - netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar
    +    - netty-tcnative-classes-2.0.61.Final.jar
    +    - netty-transport-4.1.100.Final.jar
    +    - netty-transport-classes-epoll-4.1.100.Final.jar
    +    - netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar
    +    - netty-transport-native-unix-common-4.1.100.Final.jar
    +    - netty-transport-native-unix-common-4.1.100.Final-linux-x86_64.jar
    +    - netty-codec-http2-4.1.100.Final.jar
    +    - netty-incubator-transport-classes-io_uring-0.0.21.Final.jar
    +    - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar
    +    - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar
      * GRPC
    -    - grpc-api-1.45.1.jar
    -    - grpc-context-1.45.1.jar
    -    - grpc-core-1.45.1.jar
    -    - grpc-grpclb-1.45.1.jar
    -    - grpc-netty-1.45.1.jar
    -    - grpc-protobuf-1.45.1.jar
    -    - grpc-protobuf-lite-1.45.1.jar
    -    - grpc-stub-1.45.1.jar
    +    - grpc-api-1.55.3.jar
    +    - grpc-context-1.55.3.jar
    +    - grpc-core-1.55.3.jar
    +    - grpc-grpclb-1.55.3.jar
    +    - grpc-netty-1.55.3.jar
    +    - grpc-protobuf-1.55.3.jar
    +    - grpc-protobuf-lite-1.55.3.jar
    +    - grpc-stub-1.55.3.jar
       * JEtcd
    -    - jetcd-common-0.5.11.jar
    -    - jetcd-core-0.5.11.jar
    -
    +    - jetcd-api-0.7.5.jar
    +    - jetcd-common-0.7.5.jar
    +    - jetcd-core-0.7.5.jar
    +    - jetcd-grpc-0.7.5.jar
    +  * Vertx
    +    - vertx-core-4.3.8.jar
    +    - vertx-grpc-4.3.5.jar
      * Joda Time
         - joda-time-2.10.10.jar
         - failsafe-2.4.4.jar
       * Jetty
    -    - http2-client-9.4.48.v20220622.jar
    -    - http2-common-9.4.48.v20220622.jar
    -    - http2-hpack-9.4.48.v20220622.jar
    -    - http2-http-client-transport-9.4.48.v20220622.jar
    -    - jetty-alpn-client-9.4.48.v20220622.jar
    -    - http2-server-9.4.48.v20220622.jar
    -    - jetty-alpn-java-client-9.4.48.v20220622.jar
    -    - jetty-client-9.4.48.v20220622.jar
    -    - jetty-http-9.4.48.v20220622.jar
    -    - jetty-io-9.4.48.v20220622.jar
    -    - jetty-jmx-9.4.48.v20220622.jar
    -    - jetty-security-9.4.48.v20220622.jar
    -    - jetty-server-9.4.48.v20220622.jar
    -    - jetty-servlet-9.4.48.v20220622.jar
    -    - jetty-util-9.4.48.v20220622.jar
    -    - jetty-util-ajax-9.4.48.v20220622.jar
    +    - http2-client-9.4.53.v20231009.jar
    +    - http2-common-9.4.53.v20231009.jar
    +    - http2-hpack-9.4.53.v20231009.jar
    +    - http2-http-client-transport-9.4.53.v20231009.jar
    +    - jetty-alpn-client-9.4.53.v20231009.jar
    +    - http2-server-9.4.53.v20231009.jar
    +    - jetty-alpn-java-client-9.4.53.v20231009.jar
    +    - jetty-client-9.4.53.v20231009.jar
    +    - jetty-http-9.4.53.v20231009.jar
    +    - jetty-io-9.4.53.v20231009.jar
    +    - jetty-jmx-9.4.53.v20231009.jar
    +    - jetty-security-9.4.53.v20231009.jar
    +    - jetty-server-9.4.53.v20231009.jar
    +    - jetty-servlet-9.4.53.v20231009.jar
    +    - jetty-util-9.4.53.v20231009.jar
    +    - jetty-util-ajax-9.4.53.v20231009.jar
       * Byte Buddy
    -    - byte-buddy-1.11.13.jar
    +    - byte-buddy-1.14.12.jar
       * Apache BVal
         - bval-jsr-2.0.5.jar
       * Bytecode
    @@ -368,8 +372,8 @@ The Apache Software License, Version 2.0
       * OpenCSV
         - opencsv-2.3.jar
       * Avro
    -    - avro-1.10.2.jar
    -    - avro-protobuf-1.10.2.jar
    +    - avro-1.11.3.jar
    +    - avro-protobuf-1.11.3.jar
       * Caffeine
         - caffeine-2.9.1.jar
       * Javax
    @@ -401,7 +405,7 @@ The Apache Software License, Version 2.0
       * RocksDB JNI
         - rocksdbjni-7.9.2.jar
       * SnakeYAML
    -    - snakeyaml-1.32.jar
    +    - snakeyaml-2.0.jar
       * Bean Validation API
         - validation-api-2.0.1.Final.jar
       * Objectsize
    @@ -426,21 +430,21 @@ The Apache Software License, Version 2.0
         - async-http-client-2.12.1.jar
         - async-http-client-netty-utils-2.12.1.jar
       * Apache Bookkeeper
    -    - bookkeeper-common-4.16.0.jar
    -    - bookkeeper-common-allocator-4.16.0.jar
    -    - bookkeeper-proto-4.16.0.jar
    -    - bookkeeper-server-4.16.0.jar
    -    - bookkeeper-stats-api-4.16.0.jar
    -    - bookkeeper-tools-framework-4.16.0.jar
    -    - circe-checksum-4.16.0.jar
    -    - codahale-metrics-provider-4.16.0.jar
    -    - cpu-affinity-4.16.0.jar
    -    - http-server-4.16.0.jar
    -    - prometheus-metrics-provider-4.16.0.jar
    -    - codahale-metrics-provider-4.16.0.jar
    -    - bookkeeper-slogger-api-4.16.0.jar
    -    - bookkeeper-slogger-slf4j-4.16.0.jar
    -    - native-io-4.16.0.jar
    +    - bookkeeper-common-4.16.4.jar
    +    - bookkeeper-common-allocator-4.16.4.jar
    +    - bookkeeper-proto-4.16.4.jar
    +    - bookkeeper-server-4.16.4.jar
    +    - bookkeeper-stats-api-4.16.4.jar
    +    - bookkeeper-tools-framework-4.16.4.jar
    +    - circe-checksum-4.16.4.jar
    +    - codahale-metrics-provider-4.16.4.jar
    +    - cpu-affinity-4.16.4.jar
    +    - http-server-4.16.4.jar
    +    - prometheus-metrics-provider-4.16.4.jar
    +    - codahale-metrics-provider-4.16.4.jar
    +    - bookkeeper-slogger-api-4.16.4.jar
    +    - bookkeeper-slogger-slf4j-4.16.4.jar
    +    - native-io-4.16.4.jar
       * Apache Commons
         - commons-cli-1.5.0.jar
         - commons-codec-1.15.jar
    @@ -454,9 +458,9 @@ The Apache Software License, Version 2.0
       * JSON Simple
         - json-simple-1.1.1.jar
       * Snappy
    -    - snappy-java-1.1.8.4.jar
    +    - snappy-java-1.1.10.5.jar
       * Jackson
    -    - jackson-module-parameter-names-2.13.4.jar
    +    - jackson-module-parameter-names-2.14.2.jar
       * Java Assist
         - javassist-3.25.0-GA.jar
       * Java Native Access
    @@ -468,18 +472,14 @@ The Apache Software License, Version 2.0
         - memory-0.8.3.jar
         - sketches-core-0.8.3.jar
       * Apache Zookeeper
    -    - zookeeper-3.8.1.jar
    -    - zookeeper-jute-3.8.1.jar
    +    - zookeeper-3.9.1.jar
    +    - zookeeper-jute-3.9.1.jar
       * Apache Yetus Audience Annotations
         - audience-annotations-0.12.0.jar
       * Swagger
    -    - swagger-annotations-1.6.2.jar
    +    - swagger-annotations-1.6.10.jar
       * Perfmark
    -    - perfmark-api-0.19.0.jar
    -  * Annotations
    -    - auto-service-annotations-1.0.jar
    -  * RabbitMQ Java Client
    -    - amqp-client-5.5.3.jar
    +    - perfmark-api-0.26.0.jar
       * Stream Lib
         - stream-2.9.5.jar
       * High Performance Primitive Collections for Java
    @@ -490,7 +490,7 @@ Protocol Buffers License
      * Protocol Buffers
        - protobuf-java-3.19.6.jar
        - protobuf-java-util-3.19.6.jar
    -   - proto-google-common-protos-2.0.1.jar
    +   - proto-google-common-protos-2.9.0.jar
     
     BSD 3-clause "New" or "Revised" License
       *  RE2J TD -- re2j-td-1.4.jar
    @@ -519,10 +519,7 @@ MIT License
      * JCL 1.2 Implemented Over SLF4J
        - jcl-over-slf4j-1.7.32.jar
      * Checker Qual
    -   - checker-qual-3.12.0.jar
    - * Annotations
    -   - animal-sniffer-annotations-1.19.jar
    -   - annotations-4.1.1.4.jar
    +   - checker-qual-3.33.0.jar
      * ScribeJava
        - scribejava-apis-6.9.0.jar
        - scribejava-core-6.9.0.jar
    @@ -593,7 +590,7 @@ Creative Commons Attribution License
     
     Bouncy Castle License
      * Bouncy Castle -- licenses/LICENSE-bouncycastle.txt
    -   - bcpkix-jdk15on-1.69.jar
    -   - bcprov-ext-jdk15on-1.69.jar
    -   - bcprov-jdk15on-1.69.jar
    -   - bcutil-jdk15on-1.69.jar
    +   - bcpkix-jdk18on-1.75.jar
    +   - bcprov-ext-jdk18on-1.75.jar
    +   - bcprov-jdk18on-1.75.jar
    +   - bcutil-jdk18on-1.75.jar
    diff --git a/pulsar-sql/presto-distribution/pom.xml b/pulsar-sql/presto-distribution/pom.xml
    index 48a1bf90662f6..dbd8ec200f36c 100644
    --- a/pulsar-sql/presto-distribution/pom.xml
    +++ b/pulsar-sql/presto-distribution/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
    -
       
    -    org.apache.pulsar
    +    io.streamnative
         pulsar-sql
         3.0.0-SNAPSHOT
       
    -
       pulsar-presto-distribution
       Pulsar SQL :: Pulsar Presto Distribution
    -
       
         false
         2.34
         2.6
         0.0.12
         3.0.5
    -    31.0.1-jre
    +    32.1.1-jre
         2.12.1
         2.5.1
         4.0.1
       
    -
       
         
           org.glassfish.jersey.core
    @@ -74,7 +70,6 @@
           jersey-client
           ${jersey.version}
         
    -
         
           io.trino
           trino-server-main
    @@ -103,13 +98,11 @@
             
           
         
    -
         
           io.trino
           trino-cli
           ${trino.version}
         
    -
         
           io.airlift
           launcher
    @@ -117,7 +110,6 @@
           tar.gz
           bin
         
    -
         
           io.airlift
           launcher
    @@ -125,13 +117,11 @@
           tar.gz
           properties
         
    -
         
           org.objenesis
           objenesis
           ${objenesis.version}
         
    -
         
           com.twitter.common
           objectsize
    @@ -143,56 +133,44 @@
             
           
         
    -
         
    -
         
           com.fasterxml.jackson.core
           jackson-core
         
    -
         
           com.fasterxml.jackson.core
           jackson-databind
         
    -
         
           com.fasterxml.jackson.core
           jackson-annotations
         
    -
         
           com.fasterxml.jackson.datatype
           jackson-datatype-joda
         
    -
         
           com.fasterxml.jackson.dataformat
           jackson-dataformat-yaml
         
    -
         
           com.fasterxml.jackson.datatype
           jackson-datatype-guava
         
    -
         
           com.fasterxml.jackson.datatype
           jackson-datatype-jdk8
         
    -
         
           com.fasterxml.jackson.datatype
           jackson-datatype-jsr310
         
    -
         
           com.fasterxml.jackson.dataformat
           jackson-dataformat-smile
         
    -
       
    -
       
         
           
    @@ -205,7 +183,6 @@
             netty
             3.10.6.Final
           
    -
           
             org.apache.maven
             maven-core
    @@ -295,7 +272,6 @@
           
         
       
    -
       
         
           
    @@ -305,7 +281,6 @@
               ${skipBuildDistribution}
             
           
    -
           
             org.apache.maven.plugins
             maven-assembly-plugin
    @@ -329,7 +304,6 @@
               
             
           
    -
           
             com.mycila
             license-maven-plugin
    @@ -354,7 +328,6 @@
           
         
       
    -
       
         
           skipBuildDistributionDisabled
    diff --git a/pulsar-sql/presto-distribution/src/assembly/assembly.xml b/pulsar-sql/presto-distribution/src/assembly/assembly.xml
    index 96c0421c71515..8439d60ba339e 100644
    --- a/pulsar-sql/presto-distribution/src/assembly/assembly.xml
    +++ b/pulsar-sql/presto-distribution/src/assembly/assembly.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    bin
    -    
    -        tar.gz
    -        dir
    -    
    -    false
    -    
    -        
    -            ${basedir}/LICENSE
    -            LICENSE
    -            .
    -            644
    -        
    -        
    -            ${basedir}/src/main/resources/launcher.properties
    -            launcher.properties
    -            bin/
    -            644
    -        
    -    
    -    
    -        
    -            ${basedir}/../presto-pulsar-plugin/target/pulsar-presto-connector/
    -            plugin/
    -        
    -        
    -            ${basedir}/src/main/resources/conf/
    -            conf/
    -        
    -    
    -    
    -        
    -            lib/
    -            true
    -            runtime
    -            
    -                io.airlift:launcher:tar.gz:bin:${airlift.version}
    -                io.airlift:launcher:tar.gz:properties:${airlift.version}
    -                *:tar.gz
    -            
    -        
    -        
    -            
    -            
    -                io.airlift:launcher:tar.gz:bin:${airlift.version}
    -            
    -            true
    -            755
    -        
    -        
    -            
    -            
    -                io.airlift:launcher:tar.gz:properties:${airlift.version}
    -            
    -            true
    -        
    -    
    -
    \ No newline at end of file
    +
    +  bin
    +  
    +    tar.gz
    +    dir
    +  
    +  false
    +  
    +    
    +      ${basedir}/LICENSE
    +      LICENSE
    +      .
    +      644
    +    
    +    
    +      ${basedir}/src/main/resources/launcher.properties
    +      launcher.properties
    +      bin/
    +      644
    +    
    +  
    +  
    +    
    +      ${basedir}/../presto-pulsar-plugin/target/pulsar-presto-connector/
    +      plugin/
    +    
    +    
    +      ${basedir}/src/main/resources/conf/
    +      conf/
    +    
    +  
    +  
    +    
    +      lib/
    +      true
    +      runtime
    +      
    +        io.airlift:launcher:tar.gz:bin:${airlift.version}
    +        io.airlift:launcher:tar.gz:properties:${airlift.version}
    +        *:tar.gz
    +      
    +    
    +    
    +      
    +      
    +        io.airlift:launcher:tar.gz:bin:${airlift.version}
    +      
    +      true
    +      755
    +    
    +    
    +      
    +      
    +        io.airlift:launcher:tar.gz:properties:${airlift.version}
    +      
    +      true
    +    
    +  
    +
    diff --git a/pulsar-sql/presto-distribution/src/main/resources/conf/catalog/pulsar.properties b/pulsar-sql/presto-distribution/src/main/resources/conf/catalog/pulsar.properties
    index f15cd2657e0b5..3d6e367d5782b 100644
    --- a/pulsar-sql/presto-distribution/src/main/resources/conf/catalog/pulsar.properties
    +++ b/pulsar-sql/presto-distribution/src/main/resources/conf/catalog/pulsar.properties
    @@ -113,10 +113,6 @@ pulsar.bookkeeper-explicit-interval=0
     # running in same sql worker. 0 is represents disable the cache, default is 0.
     pulsar.managed-ledger-cache-size-MB = 0
     
    -# Number of threads to be used for managed ledger tasks dispatching,
    -# default is Runtime.getRuntime().availableProcessors().
    -# pulsar.managed-ledger-num-worker-threads =
    -
     # Number of threads to be used for managed ledger scheduled tasks,
     # default is Runtime.getRuntime().availableProcessors().
     # pulsar.managed-ledger-num-scheduler-threads =
    diff --git a/pulsar-sql/presto-pulsar-plugin/pom.xml b/pulsar-sql/presto-pulsar-plugin/pom.xml
    index 11d2cbdfa9925..8a35373ebfb3a 100644
    --- a/pulsar-sql/presto-pulsar-plugin/pom.xml
    +++ b/pulsar-sql/presto-pulsar-plugin/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    4.0.0
    -
    -    
    -        org.apache.pulsar
    -        pulsar-sql
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-presto-connector
    -    Pulsar SQL :: Pulsar Presto Connector
    -
    -    
    -
    -        
    -            ${project.groupId}
    -            pulsar-presto-connector-original
    -            ${project.version}
    -        
    -
    -        
    -            ${project.groupId}
    -            bouncy-castle-bc
    -            ${project.version}
    -            pkg
    -            true
    -        
    -    
    -
    -    
    -        
    -            
    -                org.apache.maven.plugins
    -                maven-assembly-plugin
    -                3.3.0
    -                
    -                    false
    -                    true
    -                    posix
    -                    
    -                        src/assembly/assembly.xml
    -                    
    -                
    -                
    -                    
    -                        package
    -                        package
    -                        
    -                            single
    -                        
    -                    
    -                
    -            
    -        
    -    
    -
    +
    +  4.0.0
    +  
    +    io.streamnative
    +    pulsar-sql
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-presto-connector
    +  Pulsar SQL :: Pulsar Presto Connector
    +  
    +    
    +      ${project.groupId}
    +      pulsar-presto-connector-original
    +      ${project.version}
    +    
    +    
    +      ${project.groupId}
    +      bouncy-castle-bc
    +      ${project.version}
    +      pkg
    +      true
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-assembly-plugin
    +        3.3.0
    +        
    +          false
    +          true
    +          posix
    +          
    +            src/assembly/assembly.xml
    +          
    +        
    +        
    +          
    +            package
    +            package
    +            
    +              single
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        3.2.0
    +        
    +          
    +            default-jar
    +            package
    +            
    +              jar
    +            
    +          
    +          
    +            javadoc-jar
    +            package
    +            
    +              jar
    +            
    +            
    +              javadoc
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml b/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml
    index 6650abfda3fc3..649baf7318003 100644
    --- a/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml
    +++ b/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    bin
    -    
    -        tar.gz
    -        dir
    -    
    -    
    -        
    -            /
    -            false
    -            runtime
    -            
    -              jakarta.ws.rs:jakarta.ws.rs-api
    -            
    -        
    -    
    -
    \ No newline at end of file
    +
    +  bin
    +  
    +    tar.gz
    +    dir
    +  
    +  
    +    
    +      /
    +      false
    +      runtime
    +      
    +        jakarta.ws.rs:jakarta.ws.rs-api
    +      
    +    
    +  
    +
    diff --git a/pulsar-sql/presto-pulsar/pom.xml b/pulsar-sql/presto-pulsar/pom.xml
    index e61580f1fcf15..edabcbb839bb0 100644
    --- a/pulsar-sql/presto-pulsar/pom.xml
    +++ b/pulsar-sql/presto-pulsar/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    4.0.0
    -
    -    
    -        org.apache.pulsar
    -        pulsar-sql
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-presto-connector-original
    -    Pulsar SQL - Pulsar Presto Connector
    -    Pulsar SQL :: Pulsar Presto Connector Packaging
    -
    -    
    -        2.1.2
    -        1.8.4
    -    
    -
    -    
    -        
    -            io.airlift
    -            bootstrap
    -            
    -                
    -                    org.apache.logging.log4j
    -                    log4j-to-slf4j
    -                
    -            
    -        
    -        
    -            io.airlift
    -            json
    -        
    -
    -        
    -            org.apache.avro
    -            avro
    -            ${avro.version}
    -        
    -
    -        
    -            ${project.groupId}
    -            pulsar-client-admin-original
    -            ${project.version}
    -        
    -
    -        
    -            ${project.groupId}
    -            managed-ledger
    -            ${project.version}
    -        
    -
    -        
    -            org.jctools
    -            jctools-core
    -            ${jctools.version}
    -        
    -
    -        
    -            com.dslplatform
    -            dsl-json
    -            ${dslJson.verson}
    -        
    -
    -        
    -            io.trino
    -            trino-plugin-toolkit
    -            ${trino.version}
    -        
    -
    -        
    -        
    -            io.trino
    -            trino-spi
    -            ${trino.version}
    -            provided
    -        
    -
    -        
    -          joda-time
    -          joda-time
    -          ${joda.version}
    -        
    -
    -        
    -            io.trino
    -            trino-record-decoder
    -            ${trino.version}
    -        
    -
    -        
    -            javax.annotation
    -            javax.annotation-api
    -            ${javax.annotation-api.version}
    -        
    -
    -        
    -            io.jsonwebtoken
    -            jjwt-impl
    -            ${jsonwebtoken.version}
    -            test
    -        
    -
    -        
    -            io.trino
    -            trino-main
    -            ${trino.version}
    -            test
    -        
    -
    -        
    -            io.trino
    -            trino-testing
    -            ${trino.version}
    -            test
    -        
    -
    -        
    -          org.apache.pulsar
    +
    +  4.0.0
    +  
    +    io.streamnative
    +    pulsar-sql
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-presto-connector-original
    +  Pulsar SQL - Pulsar Presto Connector
    +  Pulsar SQL :: Pulsar Presto Connector Packaging
    +  
    +    2.1.2
    +    1.8.4
    +  
    +  
    +    
    +      io.airlift
    +      bootstrap
    +      
    +        
    +          org.apache.logging.log4j
    +          log4j-to-slf4j
    +        
    +      
    +    
    +    
    +      io.airlift
    +      json
    +    
    +    
    +      org.apache.avro
    +      avro
    +      ${avro.version}
    +    
    +    
    +      ${project.groupId}
    +      pulsar-client-admin-original
    +      ${project.version}
    +    
    +    
    +      ${project.groupId}
    +      managed-ledger
    +      ${project.version}
    +    
    +    
    +      org.jctools
    +      jctools-core
    +      ${jctools.version}
    +    
    +    
    +      com.dslplatform
    +      dsl-json
    +      ${dslJson.verson}
    +    
    +    
    +      io.trino
    +      trino-plugin-toolkit
    +      ${trino.version}
    +    
    +    
    +    
    +      io.trino
    +      trino-spi
    +      ${trino.version}
    +      provided
    +    
    +    
    +      joda-time
    +      joda-time
    +      ${joda.version}
    +    
    +    
    +      io.trino
    +      trino-record-decoder
    +      ${trino.version}
    +    
    +    
    +      javax.annotation
    +      javax.annotation-api
    +      ${javax.annotation-api.version}
    +    
    +    
    +      io.jsonwebtoken
    +      jjwt-impl
    +      ${jsonwebtoken.version}
    +      test
    +    
    +    
    +      io.trino
    +      trino-main
    +      ${trino.version}
    +      test
    +    
    +    
    +      io.trino
    +      trino-testing
    +      ${trino.version}
    +      test
    +    
    +    
    +      io.streamnative
    +      pulsar-broker
    +      ${project.version}
    +      test
    +    
    +    
    +      io.streamnative
    +      testmocks
    +      ${project.version}
    +      test
    +    
    +    
    +      org.eclipse.jetty
    +      jetty-http
    +      ${jetty.version}
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-shade-plugin
    +        
    +          
    +            ${shadePluginPhase}
    +            
    +              shade
    +            
    +            
    +              true
    +              true
    +              
    +                
    +                  io.streamnative:pulsar-client-original
    +                  io.streamnative:pulsar-client-admin-original
    +                  io.streamnative:managed-ledger
    +                  io.streamnative:pulsar-metadata
    +                  org.glassfish.jersey*:*
    +                  javax.ws.rs:*
    +                  javax.annotation:*
    +                  org.glassfish.hk2*:*
    +                  org.eclipse.jetty:*
    +                
    +              
    +              
    +                
    +                  io.streamnative:pulsar-client-original
    +                  
    +                    **
    +                  
    +                  
    +                    
    +                    org/bouncycastle/**
    +                  
    +                
    +              
    +              
    +                
    +                  org.glassfish
    +                  org.apache.pulsar.shade.org.glassfish
    +                
    +                
    +                  javax.ws
    +                  org.apache.pulsar.shade.javax.ws
    +                
    +                
    +                  javax.annotation
    +                  org.apache.pulsar.shade.javax.annotation
    +                
    +                
    +                  jersey
    +                  org.apache.pulsar.shade.jersey
    +                
    +                
    +                  org.eclipse.jetty
    +                  org.apache.pulsar.shade.org.eclipse.jetty
    +                
    +              
    +              
    +                
    +                
    +              
    +            
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      
    +      test-jar-dependencies
    +      
    +        
    +          maven.test.skip
    +          !true
    +        
    +      
    +      
    +        
    +          ${project.groupId}
               pulsar-broker
               ${project.version}
    +          test-jar
               test
             
    -
    -        
    -          org.apache.pulsar
    -          testmocks
    -          ${project.version}
    -          test
    -        
    -
    -        
    -          org.eclipse.jetty
    -          jetty-http
    -          ${jetty.version}
    -          test
    -        
    -
    -    
    -
    -    
    -        
    -            
    -                org.apache.maven.plugins
    -                maven-shade-plugin
    -                
    -                    
    -                        ${shadePluginPhase}
    -                        
    -                            shade
    -                        
    -                        
    -                            true
    -                            true
    -
    -                            
    -                                
    -                                    org.apache.pulsar:pulsar-client-original
    -                                    org.apache.pulsar:pulsar-client-admin-original
    -                                    org.apache.pulsar:managed-ledger
    -                                    org.apache.pulsar:pulsar-metadata
    -
    -                                    org.glassfish.jersey*:*
    -                                    javax.ws.rs:*
    -                                    javax.annotation:*
    -                                    org.glassfish.hk2*:*
    -
    -                                    org.eclipse.jetty:*
    -
    -                                
    -                            
    -                            
    -                                
    -                                    org.apache.pulsar:pulsar-client-original
    -                                    
    -                                        **
    -                                    
    -                                    
    -                                        
    -                                        org/bouncycastle/**
    -                                    
    -                                
    -                            
    -                            
    -                                
    -                                    org.glassfish
    -                                    org.apache.pulsar.shade.org.glassfish
    -                                
    -                                
    -                                    javax.ws
    -                                    org.apache.pulsar.shade.javax.ws
    -                                
    -                                
    -                                    javax.annotation
    -                                    org.apache.pulsar.shade.javax.annotation
    -                                
    -                                
    -                                    jersey
    -                                    org.apache.pulsar.shade.jersey
    -                                
    -                                
    -                                    org.eclipse.jetty
    -                                    org.apache.pulsar.shade.org.eclipse.jetty
    -                                
    -
    -                            
    -                            
    -                                
    -                                
    -                            
    -                        
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            
    -            test-jar-dependencies
    -            
    -                
    -                    maven.test.skip
    -                    !true
    -                
    -            
    -            
    -                
    -                    ${project.groupId}
    -                    pulsar-broker
    -                    ${project.version}
    -                    test-jar
    -                    test
    -                
    -            
    -        
    -    
    +      
    +    
    +  
     
    diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java
    index f86335ae3780b..42a69b142e4d3 100644
    --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java
    +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java
    @@ -767,7 +767,7 @@ private void initEntryCacheSizeAllocator(PulsarConnectorConfig connectorConfig)
                         connectorConfig.getMaxSplitQueueSizeBytes() / 2);
                 this.messageQueueCacheSizeAllocator = new NoStrictCacheSizeAllocator(
                         connectorConfig.getMaxSplitQueueSizeBytes() / 2);
    -            log.info("Init cacheSizeAllocator with maxSplitEntryQueueSizeBytes {}.",
    +            log.info("Init cacheSizeAllocator with maxSplitEntryQueueSizeBytes %d.",
                         connectorConfig.getMaxSplitQueueSizeBytes());
             } else {
                 this.entryQueueCacheSizeAllocator = new NullCacheSizeAllocator();
    diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java
    index e2d030d2d7f1b..673ea2b3940d9 100644
    --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java
    +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java
    @@ -74,7 +74,7 @@ public CompletableFuture getSchemaByVersion(byte[] schemaVersion) {
                 }
                 return cache.get(BytesSchemaVersion.of(schemaVersion));
             } catch (ExecutionException e) {
    -            LOG.error("Can't get generic schema for topic {} schema version {}",
    +            LOG.error("Can't get generic schema for topic %s schema version %s",
                         topicName.toString(), new String(schemaVersion, StandardCharsets.UTF_8), e);
                 return FutureUtil.failedFuture(e.getCause());
             }
    diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java
    index 73081f8948a51..1672d5f144817 100644
    --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java
    +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java
    @@ -54,6 +54,7 @@
     import io.trino.spi.type.Timestamps;
     import io.trino.spi.type.TinyintType;
     import io.trino.spi.type.Type;
    +import io.trino.spi.type.UuidType;
     import io.trino.spi.type.VarbinaryType;
     import io.trino.spi.type.VarcharType;
     import java.math.BigInteger;
    @@ -61,6 +62,7 @@
     import java.util.List;
     import java.util.Map;
     import java.util.Set;
    +import java.util.UUID;
     import org.apache.avro.generic.GenericEnumSymbol;
     import org.apache.avro.generic.GenericFixed;
     import org.apache.avro.generic.GenericRecord;
    @@ -87,7 +89,8 @@ public class PulsarAvroColumnDecoder {
                 TimestampType.TIMESTAMP_MILLIS,
                 DateType.DATE,
                 TimeType.TIME_MILLIS,
    -            VarbinaryType.VARBINARY);
    +            VarbinaryType.VARBINARY,
    +            UuidType.UUID);
     
         private final Type columnType;
         private final String columnMapping;
    @@ -255,6 +258,10 @@ private static Slice getSlice(Object value, Type type, String columnName) {
                 }
             }
     
    +        if (type instanceof UuidType) {
    +            return UuidType.javaUuidToTrinoUuid(UUID.fromString(value.toString()));
    +        }
    +
             throw new TrinoException(DECODER_CONVERSION_NOT_SUPPORTED,
                     format("cannot decode object of '%s' as '%s' for column '%s'",
                             value.getClass(), type, columnName));
    diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java
    index 3072bf9441b2c..e6eb6b7f2f947 100644
    --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java
    +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java
    @@ -44,6 +44,7 @@
     import io.trino.spi.type.TypeManager;
     import io.trino.spi.type.TypeSignature;
     import io.trino.spi.type.TypeSignatureParameter;
    +import io.trino.spi.type.UuidType;
     import io.trino.spi.type.VarbinaryType;
     import io.trino.spi.type.VarcharType;
     import java.util.List;
    @@ -121,6 +122,10 @@ private Type parseAvroPrestoType(String fieldName, Schema schema) {
             LogicalType logicalType  = schema.getLogicalType();
             switch (type) {
                 case STRING:
    +                if (logicalType != null && logicalType.equals(LogicalTypes.uuid())) {
    +                    return UuidType.UUID;
    +                }
    +                return createUnboundedVarcharType();
                 case ENUM:
                     return createUnboundedVarcharType();
                 case NULL:
    diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java
    index 905e3bd6becb4..8e744e3b1229c 100644
    --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java
    +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java
    @@ -58,6 +58,7 @@
     import io.trino.spi.type.Timestamps;
     import io.trino.spi.type.TinyintType;
     import io.trino.spi.type.Type;
    +import io.trino.spi.type.UuidType;
     import io.trino.spi.type.VarbinaryType;
     import io.trino.spi.type.VarcharType;
     import java.util.Iterator;
    @@ -126,7 +127,8 @@ private boolean isSupportedType(Type type) {
                     TimestampType.TIMESTAMP_MILLIS,
                     DateType.DATE,
                     TimeType.TIME_MILLIS,
    -                RealType.REAL
    +                RealType.REAL,
    +                UuidType.UUID
             ).contains(type)) {
                 return true;
             }
    diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java
    index 0d5cc2d262dfe..737eb608d82d6 100644
    --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java
    +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java
    @@ -44,6 +44,7 @@
     import io.trino.spi.type.TypeManager;
     import io.trino.spi.type.TypeSignature;
     import io.trino.spi.type.TypeSignatureParameter;
    +import io.trino.spi.type.UuidType;
     import io.trino.spi.type.VarbinaryType;
     import io.trino.spi.type.VarcharType;
     import java.util.List;
    @@ -121,6 +122,10 @@ private Type parseJsonPrestoType(String fieldName, Schema schema) {
             LogicalType logicalType  = schema.getLogicalType();
             switch (type) {
                 case STRING:
    +                if (logicalType != null && logicalType.equals(LogicalTypes.uuid())) {
    +                    return UuidType.UUID;
    +                }
    +                return createUnboundedVarcharType();
                 case ENUM:
                     return createUnboundedVarcharType();
                 case NULL:
    diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java
    index 9119ffed4e28f..7b550b7270f37 100644
    --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java
    +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java
    @@ -38,6 +38,7 @@
     import org.apache.pulsar.client.admin.PulsarAdminBuilder;
     import org.apache.pulsar.client.admin.PulsarAdminException;
     import org.apache.pulsar.client.api.AuthenticationFactory;
    +import org.apache.pulsar.client.impl.auth.AuthenticationToken;
     import org.apache.pulsar.common.policies.data.AuthAction;
     import org.apache.pulsar.common.policies.data.ClusterData;
     import org.apache.pulsar.common.policies.data.TenantInfoImpl;
    @@ -63,6 +64,9 @@ public void setup() throws Exception {
             conf.setProperties(properties);
             conf.setSuperUserRoles(Sets.newHashSet(SUPER_USER_ROLE));
             conf.setClusterName("c1");
    +        conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName());
    +        conf.setBrokerClientAuthenticationParameters("token:" + AuthTokenUtils
    +                .createToken(secretKey, SUPER_USER_ROLE, Optional.empty()));
             internalSetup();
     
             admin.clusters().createCluster("c1", ClusterData.builder().build());
    diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnectorConfig.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnectorConfig.java
    index 4f9dcf03979f7..b2b69fca90e5b 100644
    --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnectorConfig.java
    +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnectorConfig.java
    @@ -18,7 +18,13 @@
      */
     package org.apache.pulsar.sql.presto;
     
    +import io.airlift.bootstrap.Bootstrap;
    +import io.airlift.json.JsonModule;
    +import io.trino.spi.type.TypeManager;
    +import java.util.HashMap;
    +import java.util.Map;
     import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl;
    +import org.mockito.Mockito;
     import org.testng.Assert;
     import org.testng.annotations.Test;
     
    @@ -100,4 +106,34 @@ public void testGetOffloadPolices() throws Exception {
             Assert.assertEquals(offloadPolicies.getS3ManagedLedgerOffloadServiceEndpoint(), endpoint);
         }
     
    +    @Test
    +    public void testAnnotatedConfigurations() {
    +        Bootstrap app = new Bootstrap(
    +                new JsonModule(),
    +                new PulsarConnectorModule("connectorId", Mockito.mock(TypeManager.class)));
    +
    +        Map config = new HashMap<>();
    +
    +        config.put("pulsar.managed-ledger-offload-driver", "aws-s3");
    +        config.put("pulsar.offloaders-directory", "/pulsar/offloaders");
    +        config.put("pulsar.managed-ledger-offload-max-threads", "2");
    +        config.put("pulsar.offloader-properties", "{\"s3ManagedLedgerOffloadBucket\":\"offload-bucket\","
    +                + "\"s3ManagedLedgerOffloadRegion\":\"us-west-2\","
    +                + "\"s3ManagedLedgerOffloadServiceEndpoint\":\"http://s3.amazonaws.com\"}");
    +        config.put("pulsar.auth-plugin", "org.apache.pulsar.client.impl.auth.AuthenticationToken");
    +        config.put("pulsar.auth-params", "params");
    +        config.put("pulsar.tls-allow-insecure-connection", "true");
    +        config.put("pulsar.tls-hostname-verification-enable", "true");
    +        config.put("pulsar.tls-trust-cert-file-path", "/path");
    +        config.put("pulsar.bookkeeper-num-io-threads", "10");
    +        config.put("pulsar.bookkeeper-num-worker-threads", "10");
    +        config.put("pulsar.managed-ledger-num-scheduler-threads", "10");
    +        config.put("pulsar.stats-provider", "org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider");
    +        config.put("pulsar.stats-provider-configs", "{\"httpServerEnabled\":\"false\","
    +                + "\"prometheusStatsHttpPort\":\"9092\","
    +                + "\"prometheusStatsHttpEnable\":\"true\"}");
    +
    +        app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
    +    }
    +
     }
    diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java
    index 4561282c67196..0dec76b3d4dec 100644
    --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java
    +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java
    @@ -19,6 +19,7 @@
     package org.apache.pulsar.sql.presto.decoder;
     
     import java.math.BigDecimal;
    +import java.util.UUID;
     import lombok.Data;
     
     import java.util.List;
    @@ -55,6 +56,9 @@ public static enum TestEnum {
         public Map mapField;
         public CompositeRow compositeRow;
     
    +    @org.apache.avro.reflect.AvroSchema("{\"type\":\"string\",\"logicalType\":\"uuid\"}")
    +    public UUID uuidField;
    +
         public static class TestRow {
             public String stringField;
             public int intField;
    diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java
    index c4e7009b9465b..5f9df96619b9f 100644
    --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java
    +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java
    @@ -44,6 +44,7 @@
     import io.trino.spi.type.Timestamps;
     import io.trino.spi.type.Type;
     import io.trino.spi.type.TypeSignatureParameter;
    +import io.trino.spi.type.UuidType;
     import io.trino.spi.type.VarcharType;
     import java.math.BigDecimal;
     import java.time.LocalDate;
    @@ -55,6 +56,7 @@
     import java.util.HashSet;
     import java.util.List;
     import java.util.Map;
    +import java.util.UUID;
     import org.apache.pulsar.client.impl.schema.AvroSchema;
     import org.apache.pulsar.client.impl.schema.generic.GenericAvroRecord;
     import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
    @@ -90,6 +92,7 @@ public void testPrimitiveType() {
             message.longField = 222L;
             message.timestampField = System.currentTimeMillis();
             message.enumField = DecoderTestMessage.TestEnum.TEST_ENUM_1;
    +        message.uuidField = UUID.randomUUID();
     
             LocalTime now = LocalTime.now(ZoneId.systemDefault());
             message.timeField = now.toSecondOfDay() * 1000;
    @@ -137,6 +140,10 @@ public void testPrimitiveType() {
             PulsarColumnHandle timeFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(),
                     "timeField", TIME_MILLIS, false, false, "timeField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
             checkValue(decodedRow, timeFieldColumnHandle, (long) message.timeField * Timestamps.PICOSECONDS_PER_MILLISECOND);
    +
    +        PulsarColumnHandle uuidHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(),
    +                "uuidField", UuidType.UUID, false, false, "uuidField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
    +        checkValue(decodedRow, uuidHandle, UuidType.javaUuidToTrinoUuid(message.uuidField));
         }
     
         @Test
    diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java
    index 4afad9b318fc5..32e71a53444cf 100644
    --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java
    +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java
    @@ -44,6 +44,7 @@
     import io.trino.spi.type.Timestamps;
     import io.trino.spi.type.Type;
     import io.trino.spi.type.TypeSignatureParameter;
    +import io.trino.spi.type.UuidType;
     import io.trino.spi.type.VarcharType;
     import java.math.BigDecimal;
     import java.time.LocalDate;
    @@ -55,6 +56,7 @@
     import java.util.HashSet;
     import java.util.List;
     import java.util.Map;
    +import java.util.UUID;
     import org.apache.pulsar.client.impl.schema.JSONSchema;
     import org.apache.pulsar.client.impl.schema.generic.GenericJsonRecord;
     import org.apache.pulsar.client.impl.schema.generic.GenericJsonSchema;
    @@ -98,6 +100,8 @@ public void testPrimitiveType() {
             LocalDate epoch = LocalDate.ofEpochDay(0);
             message.dateField = Math.toIntExact(ChronoUnit.DAYS.between(epoch, localDate));
     
    +        message.uuidField = UUID.randomUUID();
    +
             ByteBuf payload = io.netty.buffer.Unpooled
                     .copiedBuffer(schema.encode(message));
             Map decodedRow = pulsarRowDecoder.decodeRow(payload).get();
    @@ -137,6 +141,10 @@ public void testPrimitiveType() {
             PulsarColumnHandle timeFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(),
                     "timeField", TIME_MILLIS, false, false, "timeField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
             checkValue(decodedRow, timeFieldColumnHandle, (long) message.timeField * Timestamps.PICOSECONDS_PER_MILLISECOND);
    +
    +        PulsarColumnHandle uuidHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(),
    +                "uuidField", UuidType.UUID, false, false, "uuidField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
    +        checkValue(decodedRow, uuidHandle, message.uuidField.toString());
         }
     
         @Test
    diff --git a/pulsar-testclient/pom.xml b/pulsar-testclient/pom.xml
    index ceb64ecbadd90..d5d84c2ce91f4 100644
    --- a/pulsar-testclient/pom.xml
    +++ b/pulsar-testclient/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -	4.0.0
    -	
    -		org.apache.pulsar
    -		pulsar
    -		3.0.0-SNAPSHOT
    -		..
    -	
    -
    -	pulsar-testclient
    -	Pulsar Test Client
    -	Pulsar Test Client
    -
    -	
    -		
    -			${project.groupId}
    -			testmocks
    -			${project.version}
    -			test
    -		
    -
    -		
    -			org.apache.zookeeper
    -			zookeeper
    -			
    -				
    -					ch.qos.logback
    -					logback-core
    -				
    -				
    -					ch.qos.logback
    -					logback-classic
    -				
    -				
    -					io.netty
    -					netty-tcnative
    -				
    -			
    -		
    -
    -		
    -			${project.groupId}
    -			pulsar-client-admin-original
    -			${project.version}
    -		
    -
    -		
    -			${project.groupId}
    -			pulsar-client-original
    -			${project.version}
    -		
    -
    -		
    -			${project.groupId}
    -			pulsar-client-messagecrypto-bc
    -			${project.version}
    -			true
    -		
    -
    -		
    -			${project.groupId}
    -			pulsar-broker
    -			${project.version}
    -		
    -
    -		
    -			commons-configuration
    -			commons-configuration
    -		
    -
    -		
    -			com.beust
    -			jcommander
    -			compile
    -		
    -
    -		
    -			org.hdrhistogram
    -			HdrHistogram
    -		
    -
    -		
    -			com.fasterxml.jackson.core
    -			jackson-databind
    -		
    -
    -		
    -			org.awaitility
    -			awaitility
    -			test
    -		
    -
    -
    -	
    -
    -	
    -		
    -			
    -			test-jar-dependencies
    -			
    -				
    -					maven.test.skip
    -					!true
    -				
    -			
    -			
    -				
    -					${project.groupId}
    -					pulsar-broker
    -					${project.version}
    -					test-jar
    -					test
    -				
    -			
    -		
    -	
    -
    -	
    -		
    -			
    -				org.gaul
    -				modernizer-maven-plugin
    -				
    -					true
    -					8
    -				
    -				
    -					
    -						modernizer
    -						verify
    -						
    -							modernizer
    -						
    -					
    -				
    -			
    -
    -			
    -				org.apache.maven.plugins
    -				maven-checkstyle-plugin
    -				
    -					
    -						checkstyle
    -						verify
    -						
    -							check
    -						
    -					
    -				
    -			
    -		
    -	
    +
    +  4.0.0
    +  
    +    io.streamnative
    +    pulsar
    +    3.0.0-SNAPSHOT
    +    ..
    +  
    +  pulsar-testclient
    +  Pulsar Test Client
    +  Pulsar Test Client
    +  
    +    
    +      ${project.groupId}
    +      testmocks
    +      ${project.version}
    +      test
    +    
    +    
    +      org.apache.zookeeper
    +      zookeeper
    +      
    +        
    +          ch.qos.logback
    +          logback-core
    +        
    +        
    +          ch.qos.logback
    +          logback-classic
    +        
    +        
    +          io.netty
    +          netty-tcnative
    +        
    +      
    +    
    +    
    +      ${project.groupId}
    +      pulsar-client-admin-original
    +      ${project.version}
    +    
    +    
    +      ${project.groupId}
    +      pulsar-client-original
    +      ${project.version}
    +    
    +    
    +      ${project.groupId}
    +      pulsar-client-messagecrypto-bc
    +      ${project.version}
    +      true
    +    
    +    
    +      ${project.groupId}
    +      pulsar-broker
    +      ${project.version}
    +    
    +    
    +      commons-configuration
    +      commons-configuration
    +    
    +    
    +      com.beust
    +      jcommander
    +      compile
    +    
    +    
    +      org.hdrhistogram
    +      HdrHistogram
    +    
    +    
    +      com.fasterxml.jackson.core
    +      jackson-databind
    +    
    +    
    +      org.awaitility
    +      awaitility
    +      test
    +    
    +    
    +      com.github.tomakehurst
    +      wiremock-jre8
    +      ${wiremock.version}
    +      test
    +    
    +  
    +  
    +    
    +      
    +      test-jar-dependencies
    +      
    +        
    +          maven.test.skip
    +          !true
    +        
    +      
    +      
    +        
    +          ${project.groupId}
    +          pulsar-broker
    +          ${project.version}
    +          test-jar
    +          test
    +        
    +      
    +    
    +  
    +  
    +    
    +      
    +        org.gaul
    +        modernizer-maven-plugin
    +        
    +          true
    +          8
    +        
    +        
    +          
    +            modernizer
    +            verify
    +            
    +              modernizer
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-checkstyle-plugin
    +        
    +          
    +            checkstyle
    +            verify
    +            
    +              check
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java
    index bbe535df5e289..3dc7008a41a49 100644
    --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java
    +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java
    @@ -60,7 +60,7 @@
      */
     public class LoadSimulationController {
         private static final Logger log = LoggerFactory.getLogger(LoadSimulationController.class);
    -    private static final String QUOTA_ROOT = "/loadbalance/resource-quota/namespace";
    +    private static final String QUOTA_ROOT = "/loadbalance/resource-quota";
         private static final String BUNDLE_DATA_ROOT = "/loadbalance/bundle-data";
     
         // Input streams for each client to send commands through.
    diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java
    index e40e9610bf42f..9c02ab05409b3 100644
    --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java
    +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java
    @@ -30,7 +30,6 @@
     import org.apache.pulsar.client.api.ClientBuilder;
     import org.apache.pulsar.client.api.PulsarClient;
     import org.apache.pulsar.client.api.PulsarClientException;
    -import org.apache.pulsar.client.api.SizeUnit;
     import org.apache.pulsar.common.util.DirectMemoryUtils;
     import org.slf4j.Logger;
     
    @@ -67,7 +66,6 @@ public static ClientBuilder createClientBuilderFromArguments(PerformanceBaseArgu
                 throws PulsarClientException.UnsupportedAuthenticationException {
     
             ClientBuilder clientBuilder = PulsarClient.builder()
    -                .memoryLimit(0, SizeUnit.BYTES)
                     .serviceUrl(arguments.serviceURL)
                     .connectionsPerBroker(arguments.maxConnections)
                     .ioThreads(arguments.ioThreads)
    diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java
    index 0c56f1d5c736d..3f75d38fb1e76 100644
    --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java
    +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java
    @@ -480,6 +480,7 @@ public static void main(String[] args) throws Exception {
     
                 oldTime = now;
             }
    +        PerfClientUtils.exit(0);
         }
     
         private static void executorShutdownNow() {
    @@ -553,6 +554,7 @@ private static void runProducer(int producerId,
                                         byte[] payloadBytes,
                                         CountDownLatch doneLatch) {
             PulsarClient client = null;
    +        boolean produceEnough = false;
             try {
                 // Now processing command line arguments
                 List>> futures = new ArrayList<>();
    @@ -613,10 +615,13 @@ private static void runProducer(int producerId,
                     }
                 }
                 // Send messages on all topics/producers
    -            long totalSent = 0;
    +            AtomicLong totalSent = new AtomicLong(0);
                 AtomicLong numMessageSend = new AtomicLong(0);
                 Semaphore numMsgPerTxnLimit = new Semaphore(arguments.numMessagesPerTransaction);
                 while (true) {
    +                if (produceEnough) {
    +                    break;
    +                }
                     for (Producer producer : producers) {
                         if (arguments.testTime > 0) {
                             if (System.nanoTime() > testEndTime) {
    @@ -624,17 +629,19 @@ private static void runProducer(int producerId,
                                         + "--------------", arguments.testTime);
                                 doneLatch.countDown();
                                 Thread.sleep(5000);
    -                            PerfClientUtils.exit(0);
    +                            produceEnough = true;
    +                            break;
                             }
                         }
     
                         if (numMessages > 0) {
    -                        if (totalSent++ >= numMessages) {
    +                        if (totalSent.get() >= numMessages) {
                                 log.info("------------- DONE (reached the maximum number: {} of production) --------------"
                                         , numMessages);
                                 doneLatch.countDown();
                                 Thread.sleep(5000);
    -                            PerfClientUtils.exit(0);
    +                            produceEnough = true;
    +                            break;
                             }
                         }
                         rateLimiter.acquire();
    @@ -646,7 +653,7 @@ private static void runProducer(int producerId,
     
                         if (arguments.payloadFilename != null) {
                             if (messageFormatter != null) {
    -                            payloadData = messageFormatter.formatMessage(arguments.producerName, totalSent,
    +                            payloadData = messageFormatter.formatMessage(arguments.producerName, totalSent.get(),
                                         payloadByteList.get(ThreadLocalRandom.current().nextInt(payloadByteList.size())));
                             } else {
                                 payloadData = payloadByteList.get(
    @@ -680,13 +687,13 @@ private static void runProducer(int producerId,
                         if (msgKeyMode == MessageKeyGenerationMode.random) {
                             messageBuilder.key(String.valueOf(ThreadLocalRandom.current().nextInt()));
                         } else if (msgKeyMode == MessageKeyGenerationMode.autoIncrement) {
    -                        messageBuilder.key(String.valueOf(totalSent));
    +                        messageBuilder.key(String.valueOf(totalSent.get()));
                         }
                         PulsarClient pulsarClient = client;
                         messageBuilder.sendAsync().thenRun(() -> {
                             bytesSent.add(payloadData.length);
                             messagesSent.increment();
    -
    +                        totalSent.incrementAndGet();
                             totalMessagesSent.increment();
                             totalBytesSent.add(payloadData.length);
     
    @@ -763,10 +770,12 @@ private static void runProducer(int producerId,
             } catch (Throwable t) {
                 log.error("Got error", t);
             } finally {
    +            if (!produceEnough) {
    +                doneLatch.countDown();
    +            }
                 if (null != client) {
                     try {
                         client.close();
    -                    PerfClientUtils.exit(1);
                     } catch (PulsarClientException e) {
                         log.error("Failed to close test client", e);
                     }
    diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/Oauth2PerformanceTransactionTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/Oauth2PerformanceTransactionTest.java
    index 05c9b069aca87..d19c4b3d104b7 100644
    --- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/Oauth2PerformanceTransactionTest.java
    +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/Oauth2PerformanceTransactionTest.java
    @@ -32,6 +32,7 @@
     import java.util.UUID;
     import java.util.concurrent.CountDownLatch;
     import java.util.concurrent.TimeUnit;
    +import org.apache.pulsar.broker.auth.MockOIDCIdentityProvider;
     import org.apache.pulsar.broker.authentication.AuthenticationProviderToken;
     import org.apache.pulsar.client.admin.PulsarAdmin;
     import org.apache.pulsar.client.api.Consumer;
    @@ -42,7 +43,6 @@
     import org.apache.pulsar.client.api.Schema;
     import org.apache.pulsar.client.api.SubscriptionInitialPosition;
     import org.apache.pulsar.client.api.SubscriptionType;
    -import org.apache.pulsar.client.api.TokenOauth2AuthenticatedProducerConsumerTest;
     import org.apache.pulsar.common.naming.NamespaceName;
     import org.apache.pulsar.common.naming.SystemTopicNames;
     import org.apache.pulsar.common.partition.PartitionedTopicMetadata;
    @@ -61,32 +61,25 @@ public class Oauth2PerformanceTransactionTest extends ProducerConsumerBase {
         private final String testNamespace = "perf";
         private final String myNamespace = testTenant + "/" + testNamespace;
         private final String testTopic = "persistent://" + myNamespace + "/test-";
    -    private static final Logger log = LoggerFactory.getLogger(TokenOauth2AuthenticatedProducerConsumerTest.class);
    -
    -    // public key in oauth2 server to verify the client passed in token. get from https://jwt.io/
    -    private final String TOKEN_TEST_PUBLIC_KEY = "data:;base64,MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2tZd/"
    -            + "4gJda3U2Pc3tpgRAN7JPGWx/Gn17v/0IiZlNNRbP/Mmf0Vc6G1qsnaRaWNWOR+t6/a6ekFHJMikQ1N2X6yfz4UjMc8/G2FDPRm"
    -            + "WjA+GURzARjVhxc/BBEYGoD0Kwvbq/u9CZm2QjlKrYaLfg3AeB09j0btNrDJ8rBsNzU6AuzChRvXj9IdcE/A/4N/UQ+S9cJ4UXP6"
    -            + "NJbToLwajQ5km+CnxdGE6nfB7LWHvOFHjn9C2Rb9e37CFlmeKmIVFkagFM0gbmGOb6bnGI8Bp/VNGV0APef4YaBvBTqwoZ1Z4aDH"
    -            + "y5eRxXfAMdtBkBupmBXqL6bpd15XRYUbu/7ck9QIDAQAB";
    -
    -    private final String ADMIN_ROLE = "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x@clients";
    +    private static final Logger log = LoggerFactory.getLogger(Oauth2PerformanceTransactionTest.class);
     
         // Credentials File, which contains "client_id" and "client_secret"
         private final String CREDENTIALS_FILE = "./src/test/resources/authentication/token/credentials_file.json";
     
         private final String authenticationPlugin = "org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2";
     
    +    private MockOIDCIdentityProvider server;
         private String authenticationParameters;
     
         @BeforeMethod(alwaysRun = true)
         @Override
         protected void setup() throws Exception {
    +        server = new MockOIDCIdentityProvider("a-client-secret", "my-test-audience", 30000);
             Path path = Paths.get(CREDENTIALS_FILE).toAbsolutePath();
             HashMap params = new HashMap<>();
    -        params.put("issuerUrl", new URL("https://dev-kt-aa9ne.us.auth0.com"));
    +        params.put("issuerUrl", server.getIssuer());
             params.put("privateKey", path.toUri().toURL());
    -        params.put("audience", "https://dev-kt-aa9ne.us.auth0.com/api/v2/");
    +        params.put("audience", "my-test-audience");
             ObjectMapper jsonMapper = ObjectMapperFactory.create();
             authenticationParameters = jsonMapper.writeValueAsString(params);
     
    @@ -96,7 +89,7 @@ protected void setup() throws Exception {
             conf.setAuthenticationRefreshCheckSeconds(5);
     
             Set superUserRoles = new HashSet<>();
    -        superUserRoles.add(ADMIN_ROLE);
    +        superUserRoles.add("superuser");
             conf.setSuperUserRoles(superUserRoles);
     
             Set providers = new HashSet<>();
    @@ -107,7 +100,7 @@ protected void setup() throws Exception {
     
             // Set provider domain name
             Properties properties = new Properties();
    -        properties.setProperty("tokenPublicKey", TOKEN_TEST_PUBLIC_KEY);
    +        properties.setProperty("tokenPublicKey", server.getBase64EncodedPublicKey());
     
             conf.setProperties(properties);
     
    @@ -127,6 +120,7 @@ protected void setup() throws Exception {
         @Override
         protected void cleanup() throws Exception {
             super.internalCleanup();
    +        server.stop();
         }
     
         // setup both admin and pulsar client
    diff --git a/pulsar-testclient/src/test/resources/authentication/token/credentials_file.json b/pulsar-testclient/src/test/resources/authentication/token/credentials_file.json
    index 698ad9d93e3da..92ab1c3b7cd87 100644
    --- a/pulsar-testclient/src/test/resources/authentication/token/credentials_file.json
    +++ b/pulsar-testclient/src/test/resources/authentication/token/credentials_file.json
    @@ -1,5 +1,5 @@
     {
       "type": "client_credentials",
    -  "client_id":"Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x",
    -  "client_secret":"rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb"
    +  "client_id":"superuser",
    +  "client_secret":"a-client-secret"
     }
    diff --git a/pulsar-transaction/common/pom.xml b/pulsar-transaction/common/pom.xml
    index 2b1cf90a2cda7..1bc0dbccd64f3 100644
    --- a/pulsar-transaction/common/pom.xml
    +++ b/pulsar-transaction/common/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    4.0.0
    -
    -    
    -        org.apache.pulsar
    -        pulsar-transaction-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-transaction-common
    -    Pulsar Transaction :: Common 
    -
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative
    +    pulsar-transaction-parent
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-transaction-common
    +  Pulsar Transaction :: Common 
    +  
         
    -    
    -        
    -            
    -                org.gaul
    -                modernizer-maven-plugin
    -                
    -                    true
    -                    8
    -                
    -                
    -                    
    -                        modernizer
    -                        verify
    -                        
    -                            modernizer
    -                        
    -                    
    -                
    -            
    -            
    -                com.github.spotbugs
    -                spotbugs-maven-plugin
    -                
    -                    ${basedir}/src/main/resources/findbugsExclude.xml
    -                
    -            
    -        
    -    
    +  
    +    
    +      
    +        org.gaul
    +        modernizer-maven-plugin
    +        
    +          true
    +          8
    +        
    +        
    +          
    +            modernizer
    +            verify
    +            
    +              modernizer
    +            
    +          
    +        
    +      
    +      
    +        com.github.spotbugs
    +        spotbugs-maven-plugin
    +        
    +          ${basedir}/src/main/resources/findbugsExclude.xml
    +        
    +      
    +    
    +  
     
    diff --git a/pulsar-transaction/common/src/main/resources/findbugsExclude.xml b/pulsar-transaction/common/src/main/resources/findbugsExclude.xml
    index 07f4609cfff29..47c3d73af5f06 100644
    --- a/pulsar-transaction/common/src/main/resources/findbugsExclude.xml
    +++ b/pulsar-transaction/common/src/main/resources/findbugsExclude.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    4.0.0
    -
    -    
    -        org.apache.pulsar
    -        pulsar-transaction-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-transaction-coordinator
    -    Pulsar Transaction :: Coordinator
    -
    -    
    -
    -        
    -            ${project.groupId}
    -            pulsar-common
    -            ${project.version}
    -        
    -
    -        
    -            ${project.groupId}
    -            managed-ledger
    -            ${project.version}
    -        
    -
    -        
    -            ${project.groupId}
    -            testmocks
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.awaitility
    -            awaitility
    -            test
    -        
    -
    -    
    -    
    -    
    -        
    -            
    -                org.gaul
    -                modernizer-maven-plugin
    -                
    -                    true
    -                    8
    -                
    -                
    -                    
    -                        modernizer
    -                        verify
    -                        
    -                            modernizer
    -                        
    -                    
    -                
    -            
    -            
    -                org.codehaus.mojo
    -                properties-maven-plugin
    -                
    -                    
    -                        initialize
    -                        
    -                            set-system-properties
    -                        
    -                        
    -                            
    -                                
    -                                    proto_path
    -                                    ${project.parent.parent.basedir}
    -                                
    -                                
    -                                    proto_search_strategy
    -                                    2
    -                                
    -                            
    -                        
    -                    
    -                
    -            
    -            
    -                com.github.splunk.lightproto
    -                lightproto-maven-plugin
    -                ${lightproto-maven-plugin.version}
    -                
    -                    
    -                        
    -                            generate
    -                        
    -                    
    -                
    -            
    -
    -            
    -                com.github.spotbugs
    -                spotbugs-maven-plugin
    -                
    -                    ${basedir}/src/main/resources/findbugsExclude.xml
    -                
    -            
    -        
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative
    +    pulsar-transaction-parent
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-transaction-coordinator
    +  Pulsar Transaction :: Coordinator
    +  
    +    
    +      ${project.groupId}
    +      pulsar-common
    +      ${project.version}
    +    
    +    
    +      ${project.groupId}
    +      managed-ledger
    +      ${project.version}
    +    
    +    
    +      ${project.groupId}
    +      testmocks
    +      ${project.version}
    +      test
    +    
    +    
    +      org.awaitility
    +      awaitility
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.gaul
    +        modernizer-maven-plugin
    +        
    +          true
    +          8
    +        
    +        
    +          
    +            modernizer
    +            verify
    +            
    +              modernizer
    +            
    +          
    +        
    +      
    +      
    +        org.codehaus.mojo
    +        properties-maven-plugin
    +        
    +          
    +            initialize
    +            
    +              set-system-properties
    +            
    +            
    +              
    +                
    +                  proto_path
    +                  ${project.parent.parent.basedir}
    +                
    +                
    +                  proto_search_strategy
    +                  2
    +                
    +              
    +            
    +          
    +        
    +      
    +      
    +        com.github.splunk.lightproto
    +        lightproto-maven-plugin
    +        ${lightproto-maven-plugin.version}
    +        
    +          
    +            
    +              generate
    +            
    +          
    +        
    +      
    +      
    +        com.github.spotbugs
    +        spotbugs-maven-plugin
    +        
    +          ${basedir}/src/main/resources/findbugsExclude.xml
    +        
    +      
    +    
    +  
     
    diff --git a/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml b/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml
    index a81fce11f4d21..f1cad02b28a88 100644
    --- a/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml
    +++ b/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
       pom
       
    -    org.apache.pulsar
    +    io.streamnative
         pulsar
         3.0.0-SNAPSHOT
       
    -
       pulsar-transaction-parent
       Pulsar Transaction :: Parent
    -
       
         common
         coordinator
       
    -
       
    -
         
    -
           
             com.github.spotbugs
             spotbugs-maven-plugin
    @@ -68,7 +63,5 @@
             
           
         
    -
       
    -
     
    diff --git a/pulsar-websocket/pom.xml b/pulsar-websocket/pom.xml
    index 3f8f91d1416ba..c14a0d8356a8f 100644
    --- a/pulsar-websocket/pom.xml
    +++ b/pulsar-websocket/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
    -
       
    -    org.apache.pulsar
    +    io.streamnative
         pulsar
         3.0.0-SNAPSHOT
         ..
       
    -
       pulsar-websocket
       Pulsar WebSocket
    -
       
         
           ${project.groupId}
           pulsar-broker-common
           ${project.version}
         
    -
         
           ${project.groupId}
           pulsar-client-original
           ${project.version}
         
    -
         
           ${project.groupId}
           managed-ledger
           ${project.parent.version}
           test
         
    -
         
           org.apache.commons
           commons-lang3
         
    -
         
           org.glassfish.jersey.containers
           jersey-container-servlet-core
         
    -
         
           org.glassfish.jersey.containers
           jersey-container-servlet
         
    -
         
           org.glassfish.jersey.inject
           jersey-hk2
         
    -
         
           com.google.code.gson
           gson
         
    -
         
           io.swagger
           swagger-core
         
    -
         
           com.fasterxml.jackson.jaxrs
           jackson-jaxrs-json-provider
         
    -
    -		
    +    
         
           org.eclipse.jetty.websocket
           websocket-api
           ${jetty.version}
         
    -
    -		
    +    
         
           org.eclipse.jetty.websocket
           websocket-server
           ${jetty.version}
         
    -		
    +    
         
           org.eclipse.jetty.websocket
           javax-websocket-client-impl
    @@ -115,9 +101,7 @@
           org.hdrhistogram
           HdrHistogram
         
    -
       
    -
       
         
           
    @@ -137,7 +121,6 @@
               
             
           
    -
           
             com.github.spotbugs
             spotbugs-maven-plugin
    @@ -155,7 +138,6 @@
               
             
           
    -
           
             org.apache.maven.plugins
             maven-checkstyle-plugin
    diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java
    index 579b423339911..c988fd1e70ce3 100644
    --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java
    +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java
    @@ -38,6 +38,7 @@
     import org.apache.pulsar.client.api.ConsumerCryptoFailureAction;
     import org.apache.pulsar.client.api.DeadLetterPolicy;
     import org.apache.pulsar.client.api.MessageId;
    +import org.apache.pulsar.client.api.MessageIdAdv;
     import org.apache.pulsar.client.api.PulsarClient;
     import org.apache.pulsar.client.api.PulsarClientException;
     import org.apache.pulsar.client.api.PulsarClientException.AlreadyClosedException;
    @@ -45,6 +46,7 @@
     import org.apache.pulsar.client.api.SubscriptionType;
     import org.apache.pulsar.client.api.TopicMessageId;
     import org.apache.pulsar.client.impl.ConsumerBuilderImpl;
    +import org.apache.pulsar.client.impl.TopicMessageIdImpl;
     import org.apache.pulsar.common.util.Codec;
     import org.apache.pulsar.common.util.DateFormatter;
     import org.apache.pulsar.websocket.data.ConsumerCommand;
    @@ -293,8 +295,8 @@ private void checkResumeReceive() {
     
         private void handleAck(ConsumerCommand command) throws IOException {
             // We should have received an ack
    -        TopicMessageId msgId = TopicMessageId.create(topic.toString(),
    -                MessageId.fromByteArray(Base64.getDecoder().decode(command.messageId)));
    +        TopicMessageId msgId = new TopicMessageIdImpl(topic.toString(),
    +                (MessageIdAdv) MessageId.fromByteArray(Base64.getDecoder().decode(command.messageId)));
             if (log.isDebugEnabled()) {
                 log.debug("[{}/{}] Received ack request of message {} from {} ", consumer.getTopic(),
                         subscription, msgId, getRemote().getInetSocketAddress().toString());
    diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/PingPongHandler.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/PingPongHandler.java
    deleted file mode 100644
    index 870630abc8889..0000000000000
    --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/PingPongHandler.java
    +++ /dev/null
    @@ -1,50 +0,0 @@
    -/*
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements.  See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership.  The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License.  You may obtain a copy of the License at
    - *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    - */
    -package org.apache.pulsar.websocket;
    -
    -import java.io.IOException;
    -import java.nio.ByteBuffer;
    -import org.eclipse.jetty.util.BufferUtil;
    -import org.eclipse.jetty.websocket.api.WebSocketAdapter;
    -import org.eclipse.jetty.websocket.api.WebSocketPingPongListener;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
    -
    -public class PingPongHandler extends WebSocketAdapter implements WebSocketPingPongListener {
    -
    -    private static final Logger log = LoggerFactory.getLogger(PingPongHandler.class);
    -
    -    @Override
    -    public void onWebSocketPing(ByteBuffer payload) {
    -        try {
    -            if (log.isDebugEnabled()) {
    -                log.debug("PING: {}", BufferUtil.toDetailString(payload));
    -            }
    -            getRemote().sendPong(payload);
    -        } catch (IOException e) {
    -            log.warn("Failed to send pong: {}", e.getMessage());
    -        }
    -    }
    -
    -    @Override
    -    public void onWebSocketPong(ByteBuffer payload) {
    -
    -    }
    -
    -}
    \ No newline at end of file
    diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketPingPongServlet.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketPingPongServlet.java
    deleted file mode 100644
    index cc2d79ee541ba..0000000000000
    --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketPingPongServlet.java
    +++ /dev/null
    @@ -1,44 +0,0 @@
    -/*
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements.  See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership.  The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License.  You may obtain a copy of the License at
    - *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    - */
    -package org.apache.pulsar.websocket;
    -
    -import org.eclipse.jetty.websocket.servlet.WebSocketServlet;
    -import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
    -
    -public class WebSocketPingPongServlet extends WebSocketServlet {
    -    private static final long serialVersionUID = 1L;
    -
    -    public static final String SERVLET_PATH = "/ws/pingpong";
    -    public static final String SERVLET_PATH_V2 = "/ws/v2/pingpong";
    -
    -    private final transient WebSocketService service;
    -
    -    public WebSocketPingPongServlet(WebSocketService service) {
    -        this.service = service;
    -    }
    -
    -    @Override
    -    public void configure(WebSocketServletFactory factory) {
    -        factory.getPolicy().setMaxTextMessageSize(service.getConfig().getWebSocketMaxTextFrameSize());
    -        if (service.getConfig().getWebSocketSessionIdleTimeoutMillis() > 0) {
    -            factory.getPolicy().setIdleTimeout(service.getConfig().getWebSocketSessionIdleTimeoutMillis());
    -        }
    -        factory.setCreator((request, response) -> new PingPongHandler());
    -    }
    -}
    \ No newline at end of file
    diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketServiceStarter.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketServiceStarter.java
    index fbcecc0642e34..6231ef1a2aa41 100644
    --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketServiceStarter.java
    +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketServiceStarter.java
    @@ -29,7 +29,6 @@
     import org.apache.pulsar.common.util.CmdGenerateDocs;
     import org.apache.pulsar.common.util.ShutdownUtil;
     import org.apache.pulsar.websocket.WebSocketConsumerServlet;
    -import org.apache.pulsar.websocket.WebSocketPingPongServlet;
     import org.apache.pulsar.websocket.WebSocketProducerServlet;
     import org.apache.pulsar.websocket.WebSocketReaderServlet;
     import org.apache.pulsar.websocket.WebSocketService;
    @@ -91,7 +90,6 @@ public static void start(ProxyServer proxyServer, WebSocketService service) thro
             proxyServer.addWebSocketServlet(WebSocketProducerServlet.SERVLET_PATH, new WebSocketProducerServlet(service));
             proxyServer.addWebSocketServlet(WebSocketConsumerServlet.SERVLET_PATH, new WebSocketConsumerServlet(service));
             proxyServer.addWebSocketServlet(WebSocketReaderServlet.SERVLET_PATH, new WebSocketReaderServlet(service));
    -        proxyServer.addWebSocketServlet(WebSocketPingPongServlet.SERVLET_PATH, new WebSocketPingPongServlet(service));
     
             proxyServer.addWebSocketServlet(WebSocketProducerServlet.SERVLET_PATH_V2,
                     new WebSocketProducerServlet(service));
    @@ -99,8 +97,6 @@ public static void start(ProxyServer proxyServer, WebSocketService service) thro
                     new WebSocketConsumerServlet(service));
             proxyServer.addWebSocketServlet(WebSocketReaderServlet.SERVLET_PATH_V2,
                     new WebSocketReaderServlet(service));
    -        proxyServer.addWebSocketServlet(WebSocketPingPongServlet.SERVLET_PATH_V2,
    -                new WebSocketPingPongServlet(service));
     
             proxyServer.addRestResource(ADMIN_PATH_V1, ATTRIBUTE_PROXY_SERVICE_NAME, service, WebSocketProxyStatsV1.class);
             proxyServer.addRestResource(ADMIN_PATH_V2, ATTRIBUTE_PROXY_SERVICE_NAME, service, WebSocketProxyStatsV2.class);
    diff --git a/pulsar-websocket/src/main/resources/findbugsExclude.xml b/pulsar-websocket/src/main/resources/findbugsExclude.xml
    index b7f6b0bf31d51..26c9af667da61 100644
    --- a/pulsar-websocket/src/main/resources/findbugsExclude.xml
    +++ b/pulsar-websocket/src/main/resources/findbugsExclude.xml
    @@ -1,3 +1,4 @@
    +
     
       
         
    @@ -199,4 +199,4 @@
         
         
       
    -
    \ No newline at end of file
    +
    diff --git a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/PingPongHandlerTest.java b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/PingPongSupportTest.java
    similarity index 67%
    rename from pulsar-websocket/src/test/java/org/apache/pulsar/websocket/PingPongHandlerTest.java
    rename to pulsar-websocket/src/test/java/org/apache/pulsar/websocket/PingPongSupportTest.java
    index 662009f1aab1a..8119c2f1f8131 100644
    --- a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/PingPongHandlerTest.java
    +++ b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/PingPongSupportTest.java
    @@ -18,13 +18,16 @@
      */
     package org.apache.pulsar.websocket;
     
    +import java.io.IOException;
     import java.net.URI;
     import java.nio.ByteBuffer;
     import java.util.ArrayList;
     import java.util.List;
     import java.util.concurrent.ArrayBlockingQueue;
     import java.util.concurrent.Future;
    +import javax.servlet.http.HttpServletRequest;
     import org.apache.pulsar.broker.ServiceConfiguration;
    +import org.apache.pulsar.broker.authentication.AuthenticationDataSource;
     import org.apache.pulsar.broker.web.WebExecutorThreadPool;
     import org.eclipse.jetty.client.HttpClient;
     import org.eclipse.jetty.server.Server;
    @@ -40,11 +43,18 @@
     import static org.mockito.Mockito.mock;
     import static org.mockito.Mockito.when;
     import static org.testng.Assert.assertTrue;
    +import org.eclipse.jetty.websocket.servlet.ServletUpgradeResponse;
    +import org.eclipse.jetty.websocket.servlet.WebSocketServlet;
    +import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
     import org.testng.annotations.AfterClass;
     import org.testng.annotations.BeforeClass;
    +import org.testng.annotations.DataProvider;
     import org.testng.annotations.Test;
     
    -public class PingPongHandlerTest {
    +/**
    + * Test to ensure {@link AbstractWebSocketHandler} has ping/pong support
    + */
    +public class PingPongSupportTest {
     
         private static Server server;
     
    @@ -67,9 +77,9 @@ public static void setup() throws Exception {
             when(config.getWebSocketMaxTextFrameSize()).thenReturn(1048576);
             when(config.getWebSocketSessionIdleTimeoutMillis()).thenReturn(300000);
     
    -        ServletHolder servletHolder = new ServletHolder("ws-events", new WebSocketPingPongServlet(service));
    +        ServletHolder servletHolder = new ServletHolder("ws-events", new GenericWebSocketServlet(service));
             ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
    -        context.setContextPath(WebSocketPingPongServlet.SERVLET_PATH);
    +        context.setContextPath("/ws");
             context.addServlet(servletHolder, "/*");
             server.setHandler(context);
             try {
    @@ -87,18 +97,60 @@ public static void tearDown() throws Exception {
             executor.stop();
         }
     
    -    @Test
    -    public void testPingPong() throws Exception {
    +    /**
    +     * We test these different endpoints because they are parsed in the AbstractWebSocketHandler. Technically, we are
    +     * not testing these implementations, but the ping/pong support is guaranteed as part of the framework.
    +     */
    +    @DataProvider(name = "endpoint")
    +    public static Object[][] cacheEnable() {
    +        return new Object[][] { { "producer" }, { "consumer" }, { "reader" } };
    +    }
    +
    +    @Test(dataProvider = "endpoint")
    +    public void testPingPong(String endpoint) throws Exception {
             HttpClient httpClient = new HttpClient();
             WebSocketClient webSocketClient = new WebSocketClient(httpClient);
             webSocketClient.start();
             MyWebSocket myWebSocket = new MyWebSocket();
    -        String webSocketUri = "ws://localhost:8080/ws/pingpong";
    +        String webSocketUri = "ws://localhost:8080/ws/v2/" + endpoint + "/persistent/my-property/my-ns/my-topic";
             Future sessionFuture = webSocketClient.connect(myWebSocket, URI.create(webSocketUri));
             sessionFuture.get().getRemote().sendPing(ByteBuffer.wrap("test".getBytes()));
             assertTrue(myWebSocket.getResponse().contains("test"));
         }
     
    +    public static class GenericWebSocketHandler extends AbstractWebSocketHandler {
    +
    +        public GenericWebSocketHandler(WebSocketService service, HttpServletRequest request, ServletUpgradeResponse response) {
    +            super(service, request, response);
    +        }
    +
    +        @Override
    +        protected Boolean isAuthorized(String authRole, AuthenticationDataSource authenticationData) throws Exception {
    +            return true;
    +        }
    +
    +        @Override
    +        public void close() throws IOException {
    +
    +        }
    +    }
    +
    +    public static class GenericWebSocketServlet extends WebSocketServlet {
    +
    +        private static final long serialVersionUID = 1L;
    +        private final WebSocketService service;
    +
    +        public GenericWebSocketServlet(WebSocketService service) {
    +            this.service = service;
    +        }
    +
    +        @Override
    +        public void configure(WebSocketServletFactory factory) {
    +            factory.setCreator((request, response) ->
    +                    new GenericWebSocketHandler(service, request.getHttpServletRequest(), response));
    +        }
    +    }
    +
         @WebSocket
         public static class MyWebSocket extends WebSocketAdapter implements WebSocketPingPongListener {
     
    diff --git a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/WebSocketProxyConfigurationTest.java b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/WebSocketProxyConfigurationTest.java
    index 92b4238cddd7d..a5ec63045bad3 100644
    --- a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/WebSocketProxyConfigurationTest.java
    +++ b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/WebSocketProxyConfigurationTest.java
    @@ -34,6 +34,7 @@
     import java.io.PrintWriter;
     
     import static org.testng.Assert.assertEquals;
    +import static org.testng.Assert.assertNull;
     
     public class WebSocketProxyConfigurationTest {
     
    @@ -64,6 +65,7 @@ public void testBackwardCompatibility() throws IOException {
                 printWriter.println("metadataStoreCacheExpirySeconds=500");
                 printWriter.println("zooKeeperSessionTimeoutMillis=-1");
                 printWriter.println("zooKeeperCacheExpirySeconds=-1");
    +            printWriter.println("cryptoKeyReaderFactoryClassName=");
             }
             testConfigFile.deleteOnExit();
             stream = new FileInputStream(testConfigFile);
    @@ -71,6 +73,7 @@ public void testBackwardCompatibility() throws IOException {
             stream.close();
             assertEquals(serviceConfig.getMetadataStoreSessionTimeoutMillis(), 60);
             assertEquals(serviceConfig.getMetadataStoreCacheExpirySeconds(), 500);
    +        assertNull(serviceConfig.getCryptoKeyReaderFactoryClassName());
     
             testConfigFile = new File("tmp." + System.currentTimeMillis() + ".properties");
             if (testConfigFile.exists()) {
    @@ -81,6 +84,7 @@ public void testBackwardCompatibility() throws IOException {
                 printWriter.println("metadataStoreCacheExpirySeconds=30");
                 printWriter.println("zooKeeperSessionTimeoutMillis=100");
                 printWriter.println("zooKeeperCacheExpirySeconds=300");
    +            printWriter.println("cryptoKeyReaderFactoryClassName=A.class");
             }
             testConfigFile.deleteOnExit();
             stream = new FileInputStream(testConfigFile);
    @@ -88,6 +92,7 @@ public void testBackwardCompatibility() throws IOException {
             stream.close();
             assertEquals(serviceConfig.getMetadataStoreSessionTimeoutMillis(), 100);
             assertEquals(serviceConfig.getMetadataStoreCacheExpirySeconds(), 300);
    +        assertEquals(serviceConfig.getCryptoKeyReaderFactoryClassName(), "A.class");
         }
     
         @Test
    diff --git a/src/assembly-source-package.xml b/src/assembly-source-package.xml
    index 00f00bfe3a5c7..bae52588edcf6 100644
    --- a/src/assembly-source-package.xml
    +++ b/src/assembly-source-package.xml
    @@ -31,7 +31,7 @@
         
         
           .
    -      
    +      
           true
           
             
    @@ -39,14 +39,11 @@
             src/*.py
             docker/pulsar/scripts/*.sh
             docker/pulsar/scripts/*.py
    -
             
             data/**
             logs/**
    -
             
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/).*${project.build.directory}.*]
    -
             
    -
             
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?maven-eclipse\.xml]
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.project]
    @@ -72,12 +68,10 @@
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.externalToolBuilders(/.*)?]
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.deployables(/.*)?]
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.wtpmodules(/.*)?]
    -
             
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?cobertura\.ser]
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?pom\.xml\.versionsBackup]
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?dependency-reduced-pom\.xml]
    -
             
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?pom\.xml\.releaseBackup]
             %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?release\.properties]
    @@ -86,7 +80,7 @@
         
         
           ${project.build.directory}/maven-shared-archive-resources/META-INF
    -      
    +      
         
         
           src
    diff --git a/src/check-binary-license.sh b/src/check-binary-license.sh
    index 3a6d266345f30..9c6a4d3223a70 100755
    --- a/src/check-binary-license.sh
    +++ b/src/check-binary-license.sh
    @@ -61,7 +61,7 @@ EXIT=0
     
     # Check all bundled jars are mentioned in LICENSE
     for J in $JARS; do
    -    echo $J | grep -q "org.apache.pulsar"
    +    echo $J | grep -q "io.streamnative"
         if [ $? == 0 ]; then
             continue
         fi
    @@ -104,7 +104,7 @@ if [ "$NO_PRESTO" -ne 1 ]; then
     
     
         for J in $JARS; do
    -        echo $J | grep -q "org.apache.pulsar"
    +        echo $J | grep -q "io.streamnative"
             if [ $? == 0 ]; then
           continue
             fi
    diff --git a/src/findbugs-exclude.xml b/src/findbugs-exclude.xml
    index 8f289b83a7be1..de4d3f7a93371 100644
    --- a/src/findbugs-exclude.xml
    +++ b/src/findbugs-exclude.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -	
    -		
    -			
    -
    -			
    -			
    -			
    -                        
    -			
    -			
    -		
    -	
    +
    +  
    +    
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/src/idea-code-style.xml b/src/idea-code-style.xml
    index 810dee34a3494..b49c523c322b2 100644
    --- a/src/idea-code-style.xml
    +++ b/src/idea-code-style.xml
    @@ -1,3 +1,4 @@
    +
     
     
       
    -    
         
    -    
       
    -    
       
         
    -      
       
    -
    \ No newline at end of file
    +
    diff --git a/src/owasp-dependency-check-false-positives.xml b/src/owasp-dependency-check-false-positives.xml
    index 345be8f4d2c06..151752c98f7a1 100644
    --- a/src/owasp-dependency-check-false-positives.xml
    +++ b/src/owasp-dependency-check-false-positives.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    
    +    
    +    
        file name: zookeeper-3.8.0.jar
    -   ]]>
    +   
         e395c1d8a71557b7569cc6a83487b2e30e2e58fe
         CVE-2021-28164
       
       
    -    
        file name: zookeeper-3.8.0.jar
    -   ]]>
    +   
         e395c1d8a71557b7569cc6a83487b2e30e2e58fe
         CVE-2021-29425
       
       
    -    
        file name: zookeeper-3.8.0.jar
    -   ]]>
    +   
         e395c1d8a71557b7569cc6a83487b2e30e2e58fe
         CVE-2021-34429
       
       
    -    
        file name: zookeeper-prometheus-metrics-3.8.0.jar
    -   ]]>
    +   
         849e8ece2845cb0185d721233906d487a7f1e4cf
         CVE-2021-28164
       
       
    -    
        file name: zookeeper-prometheus-metrics-3.8.0.jar
    -   ]]>
    +   
         849e8ece2845cb0185d721233906d487a7f1e4cf
         CVE-2021-29425
       
       
    -    
        file name: zookeeper-prometheus-metrics-3.8.0.jar
    -   ]]>
    +   
         849e8ece2845cb0185d721233906d487a7f1e4cf
         CVE-2021-34429
       
       
    -    
        file name: zookeeper-jute-3.8.0.jar
    -   ]]>
    +   
         6560f966bcf1aa78d27bcfa75fb6c4463a72c6c5
         CVE-2021-28164
       
       
    -    
        file name: zookeeper-jute-3.8.0.jar
    -   ]]>
    +   
         6560f966bcf1aa78d27bcfa75fb6c4463a72c6c5
         CVE-2021-29425
       
       
    -    
        file name: zookeeper-jute-3.8.0.jar
    -   ]]>
    +   
         6560f966bcf1aa78d27bcfa75fb6c4463a72c6c5
         CVE-2021-34429
       
    -
       
       
    -    
        file name: debezium-connector-postgres-1.7.2.Final.jar
    -   ]]>
    +   
         69c1edfa7d89531af511fcd07e8516fa450f746a
         CVE-2021-23214
       
    -
    -
    -
    +  
       
    -    
        file name: mariadb-java-client-2.7.5.jar
    -   ]]>
    +   
         9dd29797ecabe7d2e7fa892ec6713a5552cfcc59
         CVE-2022-27376
         CVE-2022-27377
    @@ -162,43 +159,36 @@
         CVE-2022-27386
         CVE-2022-27387
       
    -
       
       
    -    
        file name: google-http-client-gson-1.41.0.jar
    -   ]]>
    +   
         1a754a5dd672218a2ac667d7ff2b28df7a5a240e
         CVE-2022-25647
       
    -
       
         commons-net is not used at all and therefore commons-net vulnerability CVE-2021-37533 is a false positive.
         CVE-2021-37533
       
    -
       
         fredsmith utils library is not used at all. CVE-2021-4277 is a false positive.
         CVE-2021-4277
       
    -
       
         It treat pulsar-io-kafka-connect-adaptor as a lib of Kafka, CVE-2021-25194 is a false positive.
         CVE-2023-25194
       
    -
       
         It treat pulsar-io-kafka-connect-adaptor as a lib of Kafka, CVE-2021-34917 is a false positive.
         CVE-2022-34917
       
    -
       
         yaml_project is not used at all. Any CVEs reported for yaml_project are false positives.
         cpe:/a:yaml_project:yaml
       
    -
       
         flat_project is not used at all.
         cpe:/a:flat_project:flat
       
    -
    \ No newline at end of file
    +
    diff --git a/src/owasp-dependency-check-suppressions.xml b/src/owasp-dependency-check-suppressions.xml
    index 84ed2a3332cd4..e000f056b5e1f 100644
    --- a/src/owasp-dependency-check-suppressions.xml
    +++ b/src/owasp-dependency-check-suppressions.xml
    @@ -1,4 +1,4 @@
    -
    +
     
     
    -    
    -    
    -        Ignore netty CVEs in GRPC shaded Netty.
    -        .*grpc-netty-shaded.*
    -        cpe:/a:netty:netty
    -    
    -    
    -        Suppress all pulsar-presto-distribution vulnerabilities
    -        .*pulsar-presto-distribution-.*
    -        .*
    -    
    -    
    -        Suppress libthrift-0.12.0.jar vulnerabilities
    -        org.apache.thrift:libthrift:0.12.0
    -        .*
    -    
    -
    -    
    -        
    -        e80612549feb5c9191c498de628c1aa80693cf0b
    -        CVE-2022-1471
    -    
    -
    -    
    -    
    -        
    +  
    +    Ignore netty CVEs in GRPC shaded Netty.
    +    .*grpc-netty-shaded.*
    +    cpe:/a:netty:netty
    +  
    +  
    +    Suppress all pulsar-presto-distribution vulnerabilities
    +    .*pulsar-presto-distribution-.*
    +    .*
    +  
    +  
    +    Suppress libthrift-0.12.0.jar vulnerabilities
    +    org.apache.thrift:libthrift:0.12.0
    +    .*
    +  
    +  
    +  
    +    
            file name: msgpack-core-0.9.0.jar
    -       ]]>
    -        87d9ce0b22de48428fa32bb8ad476e18b6969548
    -        CVE-2022-41719
    -    
    -
    -    
    -    
    -        
    +    87d9ce0b22de48428fa32bb8ad476e18b6969548
    +    CVE-2022-41719
    +  
    +  
    +  
    +    
         file name: elasticsearch-java-8.1.0.jar
         CVE-2022-23712 is only related to Elastic server.
    -    ]]>
    -        edf5be04cbc2eafc51540ba33f9536e788b9d60b
    -        CVE-2022-23712
    -    
    -    
    -        
    +    edf5be04cbc2eafc51540ba33f9536e788b9d60b
    +    CVE-2022-23712
    +  
    +  
    +    
         file name: elasticsearch-rest-client-8.1.0.jar
         CVE-2022-23712 is only related to Elastic server.
    -    ]]>
    -        10e7aa09f10955a074c0a574cb699344d2745df1
    -        CVE-2022-23712
    -    
    -
    -    
    -    
    -        
    -        ef50bfa2c0491a11dcc35d9822edbfd6170e1ea2
    -        cpe:/a:jetbrains:kotlin
    -    
    -    
    -        
    -        3546900a3ebff0c43f31190baf87a9220e37b7ea
    -        CVE-2022-24329
    -    
    -    
    -        
    -        3302f9ec8a5c1ed220781dbd37770072549bd333
    -        CVE-2022-24329
    -    
    -    
    -        
    -        461367948840adbb0839c51d91ed74ef4a9ccb52
    -        CVE-2022-24329
    -    
    -
    -    
    -    
    -        
    +    10e7aa09f10955a074c0a574cb699344d2745df1
    +    CVE-2022-23712
    +  
    +  
    +  
    +    
            file name: canal.client-1.1.5.jar (shaded: com.google.guava:guava:22.0)
    -       ]]>
    -        b87878db57d5cfc2ca7d3972cc8f7486bf02fbca
    -        CVE-2018-10237
    -    
    -    
    -        
    +    b87878db57d5cfc2ca7d3972cc8f7486bf02fbca
    +    CVE-2018-10237
    +  
    +  
    +    
            file name: canal.client-1.1.5.jar (shaded: com.google.guava:guava:22.0)
    -       ]]>
    -        b87878db57d5cfc2ca7d3972cc8f7486bf02fbca
    -        CVE-2020-8908
    -    
    -
    -    
    -    
    -        
    +    b87878db57d5cfc2ca7d3972cc8f7486bf02fbca
    +    CVE-2020-8908
    +  
    +  
    +  
    +    
         file name: avro-1.10.2.jar
    -    ]]>
    -        a65aaa91c1aeceb3dd4859dbb9765d1c2063f5a2
    -        CVE-2021-43045
    -    
    -    
    -        
    +    a65aaa91c1aeceb3dd4859dbb9765d1c2063f5a2
    +    CVE-2021-43045
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2018-14668
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2018-14668
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2018-14669
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2018-14669
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2018-14670
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2018-14670
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2018-14671
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2018-14671
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2018-14672
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2018-14672
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2019-15024
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2019-15024
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2019-16535
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2019-16535
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2019-18657
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2019-18657
    +  
    +  
    +    
         file name: clickhouse-jdbc-0.3.2.jar
    -    ]]>
    -        fa9a1ccda7d78edb51a3a33d3493566092786a30
    -        CVE-2021-25263
    -    
    -    
    -        
    +    fa9a1ccda7d78edb51a3a33d3493566092786a30
    +    CVE-2021-25263
    +  
    +  
    +    
          file name: logback-core-1.1.3.jar
    -     ]]>
    -        e3c02049f2dbbc764681b40094ecf0dcbc99b157
    -        cpe:/a:qos:logback
    -    
    -    
    -        
    +    e3c02049f2dbbc764681b40094ecf0dcbc99b157
    +    cpe:/a:qos:logback
    +  
    +  
    +    
          file name: rocketmq-acl-4.5.2.jar
    -     ]]>
    -        0e2bd9c162280cd79c2ea0f67f174ee5d7b84ddd
    -        cpe:/a:apache:rocketmq
    -    
    -    
    -        
    -        ^pkg:maven/org\.springframework/spring.*$
    -        CVE-2016-1000027
    -    
    -    
    -        
    +    0e2bd9c162280cd79c2ea0f67f174ee5d7b84ddd
    +    cpe:/a:apache:rocketmq
    +  
    +  
    +    Ignored since we are not vulnerable
    +    ^pkg:maven/org\.springframework/spring.*$
    +    CVE-2016-1000027
    +  
    +  
    +    
          file name: logback-classic-1.1.3.jar
    -     ]]>
    -        d90276fff414f06cb375f2057f6778cd63c6082f
    -        cpe:/a:qos:logback
    -    
    -    
    -        
    +    d90276fff414f06cb375f2057f6778cd63c6082f
    +    cpe:/a:qos:logback
    +  
    +  
    +    
          file name: logback-core-1.1.3.jar
    -     ]]>
    -        e3c02049f2dbbc764681b40094ecf0dcbc99b157
    -        CVE-2017-5929
    -    
    -    
    -        
    +    e3c02049f2dbbc764681b40094ecf0dcbc99b157
    +    CVE-2017-5929
    +  
    +  
    +    
          file name: logback-classic-1.1.3.jar
    -     ]]>
    -        d90276fff414f06cb375f2057f6778cd63c6082f
    -        CVE-2017-5929
    -    
    -    
    -        
    +    d90276fff414f06cb375f2057f6778cd63c6082f
    +    CVE-2017-5929
    +  
    +  
    +    
          file name: logback-classic-1.1.3.jar
    -     ]]>
    -        d90276fff414f06cb375f2057f6778cd63c6082f
    -        CVE-2021-42550
    -    
    -
    -    
    -    
    -        
    -        c85851ca3ea8128d480d3f75c568a37e64e8a77b
    -        CVE-2020-15106
    -    
    -    
    -        
    -        c85851ca3ea8128d480d3f75c568a37e64e8a77b
    -        CVE-2020-15112
    -    
    -    
    -        
    -        c85851ca3ea8128d480d3f75c568a37e64e8a77b
    -        CVE-2020-15113
    -    
    -
    -    
    -        
    -        6dac6efe035a2be9ba299fbf31be5f903401869f
    -        CVE-2020-15106
    -    
    -    
    -        
    -        6dac6efe035a2be9ba299fbf31be5f903401869f
    -        CVE-2020-15112
    -    
    -    
    -        
    -        6dac6efe035a2be9ba299fbf31be5f903401869f
    -        CVE-2020-15113
    -    
    -
    -    
    -    
    -        
    +    d90276fff414f06cb375f2057f6778cd63c6082f
    +    CVE-2021-42550
    +  
    +  
    +    Ignore etdc CVEs in jetcd
    +    .*jetcd.*
    +    cpe:/a:etcd:etcd
    +  
    +  
    +    Ignore etdc CVEs in jetcd
    +    .*jetcd.*
    +    cpe:/a:redhat:etcd
    +  
    +  
    +    Ignore grpc CVEs in jetcd
    +    .*jetcd-grpc.*
    +    cpe:/a:grpc:grpc
    +  
    +  
    +  
    +    
            file name: bc-fips-1.0.2.jar
    -       ]]>
    -        4fb5db5f03d00f6a94e43b78d097978190e4abb2
    -        CVE-2020-26939
    -    
    -    
    -        
    +    4fb5db5f03d00f6a94e43b78d097978190e4abb2
    +    CVE-2020-26939
    +  
    +  
    +    
            file name: bcpkix-fips-1.0.2.jar
    -       ]]>
    -        543bc7a08cdba0172e95e536b5f7ca61f021253d
    -        CVE-2020-15522
    -    
    -    
    -        
    +    543bc7a08cdba0172e95e536b5f7ca61f021253d
    +    CVE-2020-15522
    +  
    +  
    +    
            file name: bcpkix-fips-1.0.2.jar
    -       ]]>
    -        543bc7a08cdba0172e95e536b5f7ca61f021253d
    -        CVE-2020-26939
    -    
    -
    -    
    -    
    -        
    +    543bc7a08cdba0172e95e536b5f7ca61f021253d
    +    CVE-2020-26939
    +  
    +  
    +  
    +    
            file name: openstack-swift-2.5.0.jar
    -       ]]>
    -        d99d0eab2e01d69d8a326fc152427fbd759af88a
    -        CVE-2016-0738
    -    
    -    
    -        
    +    d99d0eab2e01d69d8a326fc152427fbd759af88a
    +    CVE-2016-0738
    +  
    +  
    +    
            file name: openstack-swift-2.5.0.jar
    -       ]]>
    -        d99d0eab2e01d69d8a326fc152427fbd759af88a
    -        CVE-2017-16613
    -    
    -    
    -        
    +    d99d0eab2e01d69d8a326fc152427fbd759af88a
    +    CVE-2017-16613
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2018-14432
    -    
    -    
    -        
    +    a7e89bd278fa8be9fa604dda66d1606de5530797
    +    CVE-2018-14432
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2018-20170
    -    
    -    
    -        
    +    a7e89bd278fa8be9fa604dda66d1606de5530797
    +    CVE-2018-20170
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2020-12689
    -    
    -    
    -        
    +    a7e89bd278fa8be9fa604dda66d1606de5530797
    +    CVE-2020-12689
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2020-12690
    -    
    -    
    -        
    +    a7e89bd278fa8be9fa604dda66d1606de5530797
    +    CVE-2020-12690
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2020-12691
    -    
    -    
    -        
    +    a7e89bd278fa8be9fa604dda66d1606de5530797
    +    CVE-2020-12691
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2020-12692
    -    
    -    
    -        
    +    a7e89bd278fa8be9fa604dda66d1606de5530797
    +    CVE-2020-12692
    +  
    +  
    +    
            file name: openstack-keystone-2.5.0.jar
    -       ]]>
    -        a7e89bd278fa8be9fa604dda66d1606de5530797
    -        CVE-2021-3563
    -    
    -
    -    
    -    
    -        
    +    
            file name: org.apache.pulsar:pulsar-io-solr:2.10.0-SNAPSHOT
    -       ]]>
    -        ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$
    -        cpe:/a:apache:pulsar
    -    
    -    
    -        
    +    ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$
    +    cpe:/a:apache:pulsar
    +  
    +  
    +    
            file name: org.apache.pulsar:pulsar-io-solr:2.10.0-SNAPSHOT
    -       ]]>
    -        ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$
    -        cpe:/a:apache:solr
    -    
    -
    -    
    -    
    -        
    +    ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$
    +    cpe:/a:apache:solr
    +  
    +  
    +  
    +    
            file name: debezium-connector-mysql-1.9.7.Final.jar
    -       ]]>
    -        74c623b4a9b231e2f0e8f6053b38abd3bc487ce2
    -        CVE-2017-15945
    -    
    -    
    -        
    +    74c623b4a9b231e2f0e8f6053b38abd3bc487ce2
    +    CVE-2017-15945
    +  
    +  
    +    
            file name: mysql-binlog-connector-java-0.27.2.jar
    -       ]]>
    -        23294cd730e29c00b8ddfbde517dfc955aba090f
    -        CVE-2017-15945
    -    
    -    
    -        
    +    23294cd730e29c00b8ddfbde517dfc955aba090f
    +    CVE-2017-15945
    +  
    +  
    +    
            file name: debezium-connector-postgres-1.9.7.Final.jar
    -       ]]>
    -        300ff0bbf795643e914b7c8a6d6ba812a8354d62
    -        CVE-2015-0241
    -        CVE-2015-0242
    -        CVE-2015-0243
    -        CVE-2015-0244
    -        CVE-2015-3166
    -        CVE-2015-3167
    -        CVE-2016-0766
    -        CVE-2016-0768
    -        CVE-2016-0773
    -        CVE-2016-5423
    -        CVE-2016-5424
    -        CVE-2016-7048
    -        CVE-2017-14798
    -        CVE-2017-7484
    -        CVE-2018-1115
    -        CVE-2019-10127
    -        CVE-2019-10128
    -        CVE-2019-10210
    -        CVE-2019-10211
    -        CVE-2020-25694
    -        CVE-2020-25695
    -        CVE-2021-23214
    -    
    -    
    -        
    +    300ff0bbf795643e914b7c8a6d6ba812a8354d62
    +    CVE-2015-0241
    +    CVE-2015-0242
    +    CVE-2015-0243
    +    CVE-2015-0244
    +    CVE-2015-3166
    +    CVE-2015-3167
    +    CVE-2016-0766
    +    CVE-2016-0768
    +    CVE-2016-0773
    +    CVE-2016-5423
    +    CVE-2016-5424
    +    CVE-2016-7048
    +    CVE-2017-14798
    +    CVE-2017-7484
    +    CVE-2018-1115
    +    CVE-2019-10127
    +    CVE-2019-10128
    +    CVE-2019-10210
    +    CVE-2019-10211
    +    CVE-2020-25694
    +    CVE-2020-25695
    +    CVE-2021-23214
    +  
    +  
    +    
            file name: protostream-types-4.4.1.Final.jar
    -       ]]>
    -        29b45ebea1e4ce62ab3ec5eb76fa9771f98941b0
    -        CVE-2016-0750
    -        CVE-2017-15089
    -        CVE-2017-2638
    -        CVE-2019-10158
    -        CVE-2019-10174
    -        CVE-2020-25711
    -    
    -    
    -        
    +    29b45ebea1e4ce62ab3ec5eb76fa9771f98941b0
    +    CVE-2016-0750
    +    CVE-2017-15089
    +    CVE-2017-2638
    +    CVE-2019-10158
    +    CVE-2019-10174
    +    CVE-2020-25711
    +  
    +  
    +    
            file name: mariadb-java-client-2.7.5.jar
    -       ]]>
    -        9dd29797ecabe7d2e7fa892ec6713a5552cfcc59
    -        CVE-2020-28912
    -        CVE-2021-46669
    -        CVE-2021-46666
    -        CVE-2021-46667
    -    
    -    
    -    
    -        
    +    9dd29797ecabe7d2e7fa892ec6713a5552cfcc59
    +    CVE-2020-28912
    +    CVE-2021-46669
    +    CVE-2021-46666
    +    CVE-2021-46667
    +  
    +  
    +  
    +    
            file name: cassandra-driver-core-3.11.2.jar
    -       ]]>
    -        e0aad9f8611e710b9a0ce49747f7465ce07d8404
    -        CVE-2020-17516
    -        CVE-2021-44521
    -    
    -    
    -        
    +    e0aad9f8611e710b9a0ce49747f7465ce07d8404
    +    CVE-2020-17516
    +    CVE-2021-44521
    +  
    +  
    +    
            The vulnerable method is deprecated in Guava, but isn't removed. It's necessary to suppress this CVE.
            See https://github.com/google/guava/issues/4011
    -       ]]>
    -        CVE-2020-8908
    -    
    -
    +       
    +    CVE-2020-8908
    +  
    +  
    +    
    +   This is a false positive in avro-protobuf. The vulnerability is in Hamba avro golang library.
    +   
    +    CVE-2023-37475
    +  
    +  
    +    
    +    This CVE can be suppressed since it is covered in Pulsar by hostname verification changes made in https://github.com/apache/pulsar/pull/15824.
    +   
    +    CVE-2023-4586
    +  
     
    diff --git a/src/settings.xml b/src/settings.xml
    index 80ec2f40620e7..8fc2f13a37a59 100644
    --- a/src/settings.xml
    +++ b/src/settings.xml
    @@ -1,3 +1,4 @@
    +
     
         
           apache.releases.https
    @@ -34,7 +34,6 @@
           ${env.APACHE_PASSWORD}
         
       
    -
       
         
           apache
    diff --git a/structured-event-log/pom.xml b/structured-event-log/pom.xml
    index 8c058306fc278..2fbed8f2dd793 100644
    --- a/structured-event-log/pom.xml
    +++ b/structured-event-log/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
    -
       
    -    org.apache.pulsar
    +    io.streamnative
         pulsar
         3.0.0-SNAPSHOT
         ..
       
    -
       structured-event-log
       Structured event logger
    -
       
         
           org.slf4j
           slf4j-api
         
    -
         
         
           org.apache.logging.log4j
    @@ -60,7 +56,6 @@
           test
         
       
    -
       
         
           
    @@ -78,5 +73,4 @@
           
         
       
    -
     
    diff --git a/testmocks/pom.xml b/testmocks/pom.xml
    index 1cdcd7906079f..6333b5a265262 100644
    --- a/testmocks/pom.xml
    +++ b/testmocks/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
    -
       
         pulsar
    -    org.apache.pulsar
    +    io.streamnative
         3.0.0-SNAPSHOT
       
    -
       testmocks
       jar
       Pulsar Test Mocks
    -
       
    -
         
           org.apache.zookeeper
           zookeeper
    @@ -52,29 +48,23 @@
             
           
         
    -
         
           org.apache.bookkeeper
           bookkeeper-server
         
    -
         
           org.apache.commons
           commons-lang3
         
    -
         
           org.testng
           testng
         
    -
         
           org.objenesis
           objenesis
         
    -
       
    -
       
         
           
    @@ -92,5 +82,4 @@
           
         
       
    -
     
    diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java b/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java
    index d023427e3be31..dd33c2c4532bf 100644
    --- a/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java
    +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java
    @@ -52,7 +52,6 @@ public BookKeeperTestClient(ClientConfiguration conf, ZooKeeper zkc)
                 throws IOException, InterruptedException, BKException {
             super(conf, zkc, null, new UnpooledByteBufAllocator(false),
                     NullStatsLogger.INSTANCE, null, null, null);
    -        this.statsProvider = statsProvider;
         }
     
         public BookKeeperTestClient(ClientConfiguration conf)
    diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java b/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java
    index f0d279ef25050..998ef73fbd3e9 100644
    --- a/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java
    +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java
    @@ -18,6 +18,7 @@
      */
     package org.apache.bookkeeper.client;
     
    +import static com.google.common.base.Preconditions.checkArgument;
     import com.google.common.collect.Lists;
     import java.util.ArrayList;
     import java.util.Arrays;
    @@ -364,6 +365,7 @@ public synchronized CompletableFuture promiseAfter(int steps, List= 0, "The delay time must not be negative.");
             addEntryDelaysMillis.add(unit.toMillis(delay));
         }
     
    diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java b/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java
    index 4d08a7f80df5b..cf08cc635106e 100644
    --- a/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java
    +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java
    @@ -18,6 +18,7 @@
      */
     package org.apache.bookkeeper.client;
     
    +import static com.google.common.base.Preconditions.checkArgument;
     import java.util.Map;
     import java.util.concurrent.ConcurrentHashMap;
     import java.util.concurrent.TimeUnit;
    @@ -65,6 +66,7 @@ public void addCount(long delta) {
     
             @Override
             public void addLatency(long eventLatency, TimeUnit unit) {
    +            checkArgument(eventLatency >= 0, "The event latency must not be negative.");
                 long valueMillis = unit.toMillis(eventLatency);
                 updateMax(val.addAndGet(valueMillis));
             }
    diff --git a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java
    index 0c0f7ec9ed1d4..f32036e53f001 100644
    --- a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java
    +++ b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java
    @@ -1114,7 +1114,7 @@ Optional programmedFailure(Op op, String path) {
             Optional failure = failures.stream().filter(f -> f.predicate.test(op, path)).findFirst();
             if (failure.isPresent()) {
                 failures.remove(failure.get());
    -            return Optional.of(failure.get().failReturnCode);
    +            return Optional.ofNullable(failure.get().failReturnCode);
             } else {
                 return Optional.empty();
             }
    @@ -1131,6 +1131,18 @@ public void failConditional(KeeperException.Code rc, BiPredicate pre
             failures.add(new Failure(rc, predicate));
         }
     
    +    public void delay(long millis, BiPredicate predicate) {
    +        failures.add(new Failure(null, (op, s) -> {
    +            if (predicate.test(op, s)) {
    +                try {
    +                    Thread.sleep(millis);
    +                } catch (InterruptedException e) {}
    +                return true;
    +            }
    +            return false;
    +        }));
    +    }
    +
         public void setAlwaysFail(KeeperException.Code rc) {
             this.alwaysFail.set(rc);
         }
    diff --git a/tests/bc_2_0_0/pom.xml b/tests/bc_2_0_0/pom.xml
    index 93adaffe83459..d8978a381e75c 100644
    --- a/tests/bc_2_0_0/pom.xml
    +++ b/tests/bc_2_0_0/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    4.0.0
    -    
    -        org.apache.pulsar.tests
    -        tests-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    bc_2_0_0
    -    jar
    -    Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.0-rc1-incubating
    -
    -    
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client
    -            2.0.0-rc1-incubating
    -            test
    -        
    -
    -        
    -            org.testcontainers
    -            testcontainers
    -            test
    -        
    -
    -    
    -
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative.tests
    +    tests-parent
    +    3.0.0-SNAPSHOT
    +  
    +  bc_2_0_0
    +  jar
    +  Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.0-rc1-incubating
    +  
    +    
    +      org.apache.pulsar
    +      pulsar-client
    +      2.0.0-rc1-incubating
    +      test
    +    
    +    
    +      org.testcontainers
    +      testcontainers
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        
    +          
    +            
    +              test-jar
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-surefire-plugin
    +        
    +          
    +          true
    +          
    +            ${project.version}
    +            ${project.build.directory}
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      BackwardsCompatTests
    +      
    +        
    +          BackwardsCompatTests
    +        
    +      
    +      
             
    -            
    -                org.apache.maven.plugins
    -                maven-jar-plugin
    -                
    -                    
    -                        
    -                            test-jar
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-surefire-plugin
    -                
    -                    
    -                    true
    -                    
    -                        ${project.version}
    -                        ${project.build.directory}
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            BackwardsCompatTests
    -            
    -                
    -                    BackwardsCompatTests
    -                
    -            
    -            
    -                
    -                    
    -                        org.apache.maven.plugins
    -                        maven-surefire-plugin
    -                        
    -                            ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
    +          
    +            org.apache.maven.plugins
    +            maven-surefire-plugin
    +            
    +              ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
                                     -Dio.netty.leakDetectionLevel=advanced
                                     ${test.additional.args}
                                 
    -                            false
    -                            
    -                                src/test/resources/pulsar.xml
    -                            
    -                            1
    -                        
    -                    
    -                
    -            
    -        
    -    
    +              false
    +              
    +                src/test/resources/pulsar.xml
    +              
    +              1
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tests/bc_2_0_0/src/test/resources/pulsar.xml b/tests/bc_2_0_0/src/test/resources/pulsar.xml
    index 43dfaea15813c..2e9d57ed72832 100644
    --- a/tests/bc_2_0_0/src/test/resources/pulsar.xml
    +++ b/tests/bc_2_0_0/src/test/resources/pulsar.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/bc_2_0_1/pom.xml b/tests/bc_2_0_1/pom.xml
    index 41da961f3bb8e..449faeb422b51 100644
    --- a/tests/bc_2_0_1/pom.xml
    +++ b/tests/bc_2_0_1/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    4.0.0
    -    
    -        org.apache.pulsar.tests
    -        tests-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    bc_2_0_1
    -    jar
    -    Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.1-incubating
    -
    -    
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client
    -            2.0.1-incubating
    -            test
    -        
    -
    -        
    -            org.testcontainers
    -            testcontainers
    -            test
    -        
    -
    -    
    -
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative.tests
    +    tests-parent
    +    3.0.0-SNAPSHOT
    +  
    +  bc_2_0_1
    +  jar
    +  Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.1-incubating
    +  
    +    
    +      org.apache.pulsar
    +      pulsar-client
    +      2.0.1-incubating
    +      test
    +    
    +    
    +      org.testcontainers
    +      testcontainers
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        
    +          
    +            
    +              test-jar
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-surefire-plugin
    +        
    +          
    +          true
    +          
    +            ${project.version}
    +            ${project.build.directory}
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      BackwardsCompatTests
    +      
    +        
    +          BackwardsCompatTests
    +        
    +      
    +      
             
    -            
    -                org.apache.maven.plugins
    -                maven-jar-plugin
    -                
    -                    
    -                        
    -                            test-jar
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-surefire-plugin
    -                
    -                    
    -                    true
    -                    
    -                        ${project.version}
    -                        ${project.build.directory}
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            BackwardsCompatTests
    -            
    -                
    -                    BackwardsCompatTests
    -                
    -            
    -            
    -                
    -                    
    -                        org.apache.maven.plugins
    -                        maven-surefire-plugin
    -                        
    -                            ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
    +          
    +            org.apache.maven.plugins
    +            maven-surefire-plugin
    +            
    +              ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
                                     -Dio.netty.leakDetectionLevel=advanced
                                     ${test.additional.args}
                                 
    -                            false
    -                            
    -                                src/test/resources/pulsar.xml
    -                            
    -                            1
    -                        
    -                    
    -                
    -            
    -        
    -    
    +              false
    +              
    +                src/test/resources/pulsar.xml
    +              
    +              1
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tests/bc_2_0_1/src/test/resources/pulsar.xml b/tests/bc_2_0_1/src/test/resources/pulsar.xml
    index 43dfaea15813c..2e9d57ed72832 100644
    --- a/tests/bc_2_0_1/src/test/resources/pulsar.xml
    +++ b/tests/bc_2_0_1/src/test/resources/pulsar.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/bc_2_6_0/pom.xml b/tests/bc_2_6_0/pom.xml
    index f1e75c41ae6d3..7cd7b8ef6d889 100644
    --- a/tests/bc_2_6_0/pom.xml
    +++ b/tests/bc_2_6_0/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    
    -        org.apache.pulsar.tests
    -        tests-parent
    -        3.0.0-SNAPSHOT
    -    
    -    4.0.0
    -
    -    bc_2_6_0
    -    jar
    -    Apache Pulsar :: Tests :: Backwards Client Compatibility 2.6.0
    -
    -    
    -
    -        
    -            com.google.code.gson
    -            gson
    -            test
    -        
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client
    -            2.6.0
    -            test
    -        
    -
    -        
    -            org.testcontainers
    -            testcontainers
    -            test
    -        
    -
    -    
    -
    -    
    +
    +  
    +    io.streamnative.tests
    +    tests-parent
    +    3.0.0-SNAPSHOT
    +  
    +  4.0.0
    +  bc_2_6_0
    +  jar
    +  Apache Pulsar :: Tests :: Backwards Client Compatibility 2.6.0
    +  
    +    
    +      com.google.code.gson
    +      gson
    +      test
    +    
    +    
    +      org.apache.pulsar
    +      pulsar-client
    +      2.6.0
    +      test
    +    
    +    
    +      org.testcontainers
    +      testcontainers
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        
    +          
    +            
    +              test-jar
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-surefire-plugin
    +        
    +          
    +          true
    +          
    +            ${project.version}
    +            ${project.build.directory}
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      BackwardsCompatTests
    +      
    +        
    +          BackwardsCompatTests
    +        
    +      
    +      
             
    -            
    -                org.apache.maven.plugins
    -                maven-jar-plugin
    -                
    -                    
    -                        
    -                            test-jar
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-surefire-plugin
    -                
    -                    
    -                    true
    -                    
    -                        ${project.version}
    -                        ${project.build.directory}
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            BackwardsCompatTests
    -            
    -                
    -                    BackwardsCompatTests
    -                
    -            
    -            
    -                
    -                    
    -                        org.apache.maven.plugins
    -                        maven-surefire-plugin
    -                        
    -                            ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
    +          
    +            org.apache.maven.plugins
    +            maven-surefire-plugin
    +            
    +              ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
                                     -Dio.netty.leakDetectionLevel=advanced
                                     ${test.additional.args}
                                 
    -                            false
    -                            
    -                                src/test/resources/backwards-client.xml
    -                            
    -                            1
    -                        
    -                    
    -                
    -            
    -        
    -    
    -
    -
    +              false
    +              
    +                src/test/resources/backwards-client.xml
    +              
    +              1
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tests/bc_2_6_0/src/test/resources/backwards-client.xml b/tests/bc_2_6_0/src/test/resources/backwards-client.xml
    index 43dfaea15813c..2e9d57ed72832 100644
    --- a/tests/bc_2_6_0/src/test/resources/backwards-client.xml
    +++ b/tests/bc_2_6_0/src/test/resources/backwards-client.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/certificate-authority/.gitignore b/tests/certificate-authority/.gitignore
    new file mode 100644
    index 0000000000000..de3be7546361f
    --- /dev/null
    +++ b/tests/certificate-authority/.gitignore
    @@ -0,0 +1,3 @@
    +# Files generated when running openssl
    +*.old
    +*.attr
    diff --git a/tests/certificate-authority/README.md b/tests/certificate-authority/README.md
    index 008120a35f4a1..02ebbdf9258d5 100644
    --- a/tests/certificate-authority/README.md
    +++ b/tests/certificate-authority/README.md
    @@ -3,23 +3,33 @@
     Generated based on instructions from https://jamielinux.com/docs/openssl-certificate-authority/introduction.html,
     though the intermediate CA has been omitted for simplicity.
     
    -The environment variable, CA_HOME, must be set to point to the directory
    -containing this file before running any openssl commands.
    +The following commands must be run in the same directory as this README due to the configuration for the openssl.cnf file.
     
     The password for the CA private key is ```PulsarTesting```.
     
     ## Generating server keys
     
    -In this example, we're generating a key for the broker.
    +In this example, we're generating a key for the broker and the proxy. If there is a need to create them again, a new
    +CN will need to be used because we have the index.txt database in this directory. It's also possible that we could
    +remove this file and start over. At the time of adding this change, I didn't see a need to change the paradigm.
     
    -The common name when generating the CSR should be the domain name of the broker.
    +The common name when generating the CSR used to be the domain name of the broker. However, now we rely on the Subject
    +Alternative Name, or the SAN, to be the domain name. This is because the CN is deprecated in the certificate spec. The
    +[openssl.cnf](openssl.cnf) file has been updated to reflect this change. The proxy and the broker have the following
    +SAN: ```DNS:localhost, IP:127.0.0.1```.
     
     ```bash
     openssl genrsa -out server-keys/broker.key.pem 2048
    -openssl req -config openssl.cnf -key server-keys/broker.key.pem -new -sha256 -out server-keys/broker.csr.pem
    -openssl ca -config openssl.cnf -extensions server_cert \
    -    -days 100000 -notext -md sha256 -in server-keys/broker.csr.pem -out server-keys/broker.cert.pem
    +openssl req -config openssl.cnf -subj "/CN=broker-localhost-SAN" -key server-keys/broker.key.pem -new -sha256 -out server-keys/broker.csr.pem
    +openssl ca -config openssl.cnf -extensions broker_cert -days 100000 -md sha256 -in server-keys/broker.csr.pem \
    +    -out server-keys/broker.cert.pem -batch -key PulsarTesting
     openssl pkcs8 -topk8 -inform PEM -outform PEM -in server-keys/broker.key.pem -out server-keys/broker.key-pk8.pem -nocrypt
    +
    +openssl genrsa -out server-keys/proxy.key.pem 2048
    +openssl req -config openssl.cnf -subj "/CN=proxy-localhost-SAN" -key server-keys/proxy.key.pem -new -sha256 -out server-keys/proxy.csr.pem
    +openssl ca -config openssl.cnf -extensions proxy_cert -days 100000 -md sha256 -in server-keys/proxy.csr.pem \
    +    -out server-keys/proxy.cert.pem -batch -key PulsarTesting
    +openssl pkcs8 -topk8 -inform PEM -outform PEM -in server-keys/proxy.key.pem -out server-keys/proxy.key-pk8.pem -nocrypt
     ```
     
     You need to configure the server with broker.key-pk8.pem and broker.cert.pem.
    diff --git a/tests/certificate-authority/index.txt b/tests/certificate-authority/index.txt
    index 376f86725c21a..acb5eed051c13 100644
    --- a/tests/certificate-authority/index.txt
    +++ b/tests/certificate-authority/index.txt
    @@ -5,3 +5,5 @@ V	22920409135604Z		1003	unknown	/CN=proxy
     V	22920410132517Z		1004	unknown	/CN=superproxy
     V	22920411084025Z		1005	unknown	/CN=user1
     V	22960802101401Z		1006	unknown	/CN=proxy.pulsar.apache.org
    +V	22970222155018Z		1007	unknown	/CN=broker-localhost-SAN
    +V	22970222155019Z		1008	unknown	/CN=proxy-localhost-SAN
    diff --git a/tests/certificate-authority/newcerts/1007.pem b/tests/certificate-authority/newcerts/1007.pem
    new file mode 100644
    index 0000000000000..4237719f20ebd
    --- /dev/null
    +++ b/tests/certificate-authority/newcerts/1007.pem
    @@ -0,0 +1,111 @@
    +Certificate:
    +    Data:
    +        Version: 3 (0x2)
    +        Serial Number: 4103 (0x1007)
    +    Signature Algorithm: sha256WithRSAEncryption
    +        Issuer: CN=foobar
    +        Validity
    +            Not Before: May 10 15:50:18 2023 GMT
    +            Not After : Feb 22 15:50:18 2297 GMT
    +        Subject: CN=broker-localhost-SAN
    +        Subject Public Key Info:
    +            Public Key Algorithm: rsaEncryption
    +                Public-Key: (2048 bit)
    +                Modulus:
    +                    00:de:d1:da:bb:91:b3:16:c4:b2:e8:89:30:9e:c1:
    +                    5e:0b:cf:db:c4:c3:d9:b1:af:40:a5:0b:38:36:1b:
    +                    14:fe:0f:22:9c:e6:59:6a:15:5b:db:f6:f7:f3:a5:
    +                    02:29:94:7a:d2:0c:67:ad:aa:63:62:7e:fc:58:11:
    +                    29:48:b8:3c:91:b2:73:7e:12:6b:f2:ea:36:77:0f:
    +                    15:9b:46:95:ce:73:15:8d:c8:d9:97:57:03:90:33:
    +                    2d:7d:f3:ee:e5:01:6d:d8:c6:da:ab:07:b9:dd:1c:
    +                    e0:4b:ce:6a:de:a8:d2:e3:c1:52:6d:83:3a:0a:f0:
    +                    ed:cf:f7:56:6a:87:0e:73:e3:12:82:2b:65:ab:d8:
    +                    a9:44:5b:4a:2f:a5:92:94:32:f1:a1:e4:af:18:0f:
    +                    0f:18:60:cd:f7:d0:9d:03:9f:d7:e9:a8:60:54:bb:
    +                    3b:9a:05:db:fd:38:04:3c:b4:23:41:16:6c:7c:3b:
    +                    d9:b6:e0:2f:bd:cb:62:55:1b:e8:d0:8f:43:76:ef:
    +                    55:86:cf:25:c3:bc:ae:e3:46:50:89:f7:71:ad:06:
    +                    5e:28:e6:f6:f0:76:27:ea:7e:1b:67:53:39:26:20:
    +                    19:18:82:b1:11:5f:ea:91:c2:e3:d3:f6:5a:c7:fd:
    +                    61:a2:92:de:7d:7c:da:6d:e8:bf:39:52:10:31:60:
    +                    4b:e1
    +                Exponent: 65537 (0x10001)
    +        X509v3 extensions:
    +            X509v3 Basic Constraints: 
    +                CA:FALSE
    +            Netscape Cert Type: 
    +                SSL Server
    +            Netscape Comment: 
    +                OpenSSL Generated Server Certificate
    +            X509v3 Subject Key Identifier: 
    +                17:07:3B:AA:85:83:B5:04:83:EC:B2:6C:1E:3A:F0:F5:59:AA:61:28
    +            X509v3 Subject Alternative Name: 
    +                DNS:localhost, DNS:unresolvable-broker-address, IP Address:127.0.0.1
    +            X509v3 Authority Key Identifier: 
    +                keyid:57:0B:E9:CB:23:E8:BF:47:3E:50:7A:3F:45:7E:A1:18:43:9D:15:27
    +                DirName:/CN=foobar
    +                serial:D7:E2:87:4F:A0:79:E2:0C
    +
    +            X509v3 Key Usage: critical
    +                Digital Signature, Key Encipherment
    +            X509v3 Extended Key Usage: 
    +                TLS Web Server Authentication
    +    Signature Algorithm: sha256WithRSAEncryption
    +         e4:27:61:e2:0f:b6:a0:ca:9f:ce:e3:53:0b:44:ab:86:a1:e2:
    +         4d:88:e1:7d:2e:b0:aa:32:96:2b:3d:da:60:70:6a:c3:62:c5:
    +         76:f2:8f:0d:16:31:f2:ad:e5:2f:43:f3:cb:e4:fa:95:6c:20:
    +         81:33:1a:c7:5a:55:57:c9:ab:ca:66:45:30:58:00:db:e8:51:
    +         c9:2c:a9:72:c1:18:f5:01:87:9f:73:20:85:6c:e5:6c:3f:c9:
    +         67:b4:f0:20:e5:ed:e2:4a:08:0b:af:68:43:e5:a9:c7:e1:39:
    +         e8:b5:49:cb:47:4a:6d:e5:16:ae:88:92:13:85:8e:42:1e:0a:
    +         eb:59:ed:a7:c1:9b:bc:4b:7b:99:f8:1d:f0:d7:1d:90:c9:cf:
    +         86:6a:d3:10:d0:36:e4:f5:b9:33:79:c7:a2:68:31:f7:bb:8d:
    +         1e:d6:33:79:bd:e7:0e:4f:4d:e9:2e:15:04:4f:6b:4b:2e:93:
    +         28:72:d1:0e:aa:ee:e6:ef:68:be:58:2b:cc:56:01:27:16:f9:
    +         34:8e:66:86:27:0a:b0:fb:32:56:a9:8a:d9:6f:b1:86:bd:ba:
    +         fd:50:6c:d5:b2:54:e7:4e:c6:2d:19:88:a9:89:2c:ef:be:08:
    +         0d:2b:49:91:0b:09:42:64:06:a3:9d:d7:94:ed:e8:74:74:48:
    +         43:57:41:6f:e5:06:98:46:1d:c5:60:9c:69:f8:fb:fe:a6:01:
    +         4a:35:be:21:36:c2:a3:44:c8:c4:2c:21:09:f4:28:9a:ad:a0:
    +         97:1e:00:29:cc:0f:26:fa:59:21:25:c0:9e:fa:22:53:67:6d:
    +         ab:a6:56:08:fd:37:1d:69:fe:ef:6f:29:89:1a:66:7b:c7:ff:
    +         b1:34:f1:d6:be:21:81:e3:bc:4f:13:02:a7:4b:9d:13:05:46:
    +         40:88:4a:aa:db:fb:64:f8:6b:fb:5d:a0:b1:0c:1a:b8:4c:ab:
    +         6f:69:fe:0b:55:4e:b3:38:1f:91:0b:71:77:1e:11:39:54:9a:
    +         62:51:ea:6d:a8:5e:0d:4a:91:fb:d8:be:5d:93:e8:43:f3:4a:
    +         11:fb:31:cf:14:1a:1c:8d:31:1b:99:31:e0:2b:81:01:91:6f:
    +         da:ba:cb:1f:51:21:55:29:3f:4c:71:e3:d0:29:41:de:a0:00:
    +         da:07:ed:5e:c9:af:32:61:6d:55:f8:f5:2d:46:03:34:33:fb:
    +         2e:1e:aa:7c:fe:d2:30:4d:40:cc:ed:76:ec:f6:bd:ed:35:c8:
    +         d8:b3:46:56:aa:2c:53:84:56:45:b0:a3:f6:35:66:93:da:8c:
    +         17:39:c1:29:7c:99:c5:0b:73:c1:f9:16:d0:57:fc:57:59:06:
    +         af:39:9f:a9:51:35:0b:c7
    +-----BEGIN CERTIFICATE-----
    +MIIExzCCAq+gAwIBAgICEAcwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    +YmFyMCAXDTIzMDUxMDE1NTAxOFoYDzIyOTcwMjIyMTU1MDE4WjAfMR0wGwYDVQQD
    +DBRicm9rZXItbG9jYWxob3N0LVNBTjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
    +AQoCggEBAN7R2ruRsxbEsuiJMJ7BXgvP28TD2bGvQKULODYbFP4PIpzmWWoVW9v2
    +9/OlAimUetIMZ62qY2J+/FgRKUi4PJGyc34Sa/LqNncPFZtGlc5zFY3I2ZdXA5Az
    +LX3z7uUBbdjG2qsHud0c4EvOat6o0uPBUm2DOgrw7c/3VmqHDnPjEoIrZavYqURb
    +Si+lkpQy8aHkrxgPDxhgzffQnQOf1+moYFS7O5oF2/04BDy0I0EWbHw72bbgL73L
    +YlUb6NCPQ3bvVYbPJcO8ruNGUIn3ca0GXijm9vB2J+p+G2dTOSYgGRiCsRFf6pHC
    +49P2Wsf9YaKS3n182m3ovzlSEDFgS+ECAwEAAaOCARcwggETMAkGA1UdEwQCMAAw
    +EQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVy
    +YXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFBcHO6qFg7UEg+yybB46
    +8PVZqmEoMDcGA1UdEQQwMC6CCWxvY2FsaG9zdIIbdW5yZXNvbHZhYmxlLWJyb2tl
    +ci1hZGRyZXNzhwR/AAABMEEGA1UdIwQ6MDiAFFcL6csj6L9HPlB6P0V+oRhDnRUn
    +oRWkEzARMQ8wDQYDVQQDDAZmb29iYXKCCQDX4odPoHniDDAOBgNVHQ8BAf8EBAMC
    +BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggIBAOQnYeIP
    +tqDKn87jUwtEq4ah4k2I4X0usKoylis92mBwasNixXbyjw0WMfKt5S9D88vk+pVs
    +IIEzGsdaVVfJq8pmRTBYANvoUcksqXLBGPUBh59zIIVs5Ww/yWe08CDl7eJKCAuv
    +aEPlqcfhOei1SctHSm3lFq6IkhOFjkIeCutZ7afBm7xLe5n4HfDXHZDJz4Zq0xDQ
    +NuT1uTN5x6JoMfe7jR7WM3m95w5PTekuFQRPa0sukyhy0Q6q7ubvaL5YK8xWAScW
    ++TSOZoYnCrD7MlapitlvsYa9uv1QbNWyVOdOxi0ZiKmJLO++CA0rSZELCUJkBqOd
    +15Tt6HR0SENXQW/lBphGHcVgnGn4+/6mAUo1viE2wqNEyMQsIQn0KJqtoJceACnM
    +Dyb6WSElwJ76IlNnbaumVgj9Nx1p/u9vKYkaZnvH/7E08da+IYHjvE8TAqdLnRMF
    +RkCISqrb+2T4a/tdoLEMGrhMq29p/gtVTrM4H5ELcXceETlUmmJR6m2oXg1KkfvY
    +vl2T6EPzShH7Mc8UGhyNMRuZMeArgQGRb9q6yx9RIVUpP0xx49ApQd6gANoH7V7J
    +rzJhbVX49S1GAzQz+y4eqnz+0jBNQMztduz2ve01yNizRlaqLFOEVkWwo/Y1ZpPa
    +jBc5wSl8mcULc8H5FtBX/FdZBq85n6lRNQvH
    +-----END CERTIFICATE-----
    diff --git a/tests/certificate-authority/newcerts/1008.pem b/tests/certificate-authority/newcerts/1008.pem
    new file mode 100644
    index 0000000000000..85687bdfd30ba
    --- /dev/null
    +++ b/tests/certificate-authority/newcerts/1008.pem
    @@ -0,0 +1,110 @@
    +Certificate:
    +    Data:
    +        Version: 3 (0x2)
    +        Serial Number: 4104 (0x1008)
    +    Signature Algorithm: sha256WithRSAEncryption
    +        Issuer: CN=foobar
    +        Validity
    +            Not Before: May 10 15:50:19 2023 GMT
    +            Not After : Feb 22 15:50:19 2297 GMT
    +        Subject: CN=proxy-localhost-SAN
    +        Subject Public Key Info:
    +            Public Key Algorithm: rsaEncryption
    +                Public-Key: (2048 bit)
    +                Modulus:
    +                    00:cc:15:c9:85:06:43:47:bd:46:9f:4f:03:1a:e0:
    +                    6e:94:13:4e:b0:30:ea:88:ca:3a:e4:39:92:12:c1:
    +                    77:51:8c:0d:3c:b9:26:5c:2f:dc:fc:b1:5a:bf:0e:
    +                    47:ff:09:60:30:79:8e:55:26:fe:d0:a1:ed:9f:6d:
    +                    8a:6a:06:85:f0:d0:dc:94:a6:54:a1:a6:c9:3e:57:
    +                    d5:69:7d:e9:25:c1:ef:6b:77:e1:62:76:d8:e4:54:
    +                    91:40:bc:0b:11:74:b8:30:bb:d4:02:77:d6:bd:d2:
    +                    d0:e7:ad:df:7d:98:96:74:42:ad:53:b3:88:c8:dc:
    +                    1d:db:51:63:84:ee:7e:85:73:14:5e:d4:c8:f0:01:
    +                    5f:67:52:ed:94:87:f7:d6:aa:28:8b:2c:84:98:8c:
    +                    b9:91:b5:38:99:80:5d:b3:d4:db:95:96:09:ef:1d:
    +                    a1:6f:86:c8:17:86:f7:0a:1e:72:3b:50:8c:53:e5:
    +                    ce:d4:8c:cf:cc:81:3d:46:55:ff:65:25:0b:36:31:
    +                    31:a6:22:27:47:96:59:38:c1:cd:66:a6:9a:83:98:
    +                    dc:b8:2e:10:8d:ba:45:ae:aa:20:6e:e3:0b:bd:ec:
    +                    e6:63:b5:40:55:d4:fe:97:b1:f1:8d:9a:c0:a2:46:
    +                    8e:a3:ed:a0:1b:ed:40:b0:00:a5:28:f9:da:03:bd:
    +                    c1:a9
    +                Exponent: 65537 (0x10001)
    +        X509v3 extensions:
    +            X509v3 Basic Constraints: 
    +                CA:FALSE
    +            Netscape Cert Type: 
    +                SSL Server
    +            Netscape Comment: 
    +                OpenSSL Generated Server Certificate
    +            X509v3 Subject Key Identifier: 
    +                C5:33:73:67:03:B7:51:08:F4:BD:D3:CD:4F:DC:CF:83:11:53:AD:39
    +            X509v3 Subject Alternative Name: 
    +                DNS:localhost, IP Address:127.0.0.1
    +            X509v3 Authority Key Identifier: 
    +                keyid:57:0B:E9:CB:23:E8:BF:47:3E:50:7A:3F:45:7E:A1:18:43:9D:15:27
    +                DirName:/CN=foobar
    +                serial:D7:E2:87:4F:A0:79:E2:0C
    +
    +            X509v3 Key Usage: critical
    +                Digital Signature, Key Encipherment
    +            X509v3 Extended Key Usage: 
    +                TLS Web Server Authentication
    +    Signature Algorithm: sha256WithRSAEncryption
    +         43:ef:67:29:9a:0c:53:97:7c:fc:72:73:6c:8d:48:78:4e:ec:
    +         e3:14:9d:d9:1e:83:4c:d6:f0:56:e9:c4:d8:de:f5:54:fb:a5:
    +         3b:ff:59:23:75:26:74:f0:86:90:d0:4d:41:25:03:87:e0:60:
    +         a4:9b:33:3d:bd:1c:79:b8:db:86:1c:38:09:26:0d:80:3e:f9:
    +         1e:28:11:0d:3d:6b:1e:1a:7a:9a:fa:fc:18:22:7f:fd:46:55:
    +         c2:2f:56:5c:5c:8a:45:f2:74:7a:e4:6c:d0:e0:ea:ec:74:b7:
    +         0d:a8:f3:ca:18:cf:a4:be:a0:e0:4a:32:ca:15:7e:5d:06:56:
    +         b7:71:7c:e0:dc:19:fa:be:3e:94:84:20:be:96:34:61:0b:f0:
    +         d1:d6:31:49:0b:b0:20:b8:f9:5c:49:08:13:9b:45:c0:6f:58:
    +         16:81:0b:0c:f8:66:38:58:83:d4:b0:bc:14:35:8d:e2:1d:d5:
    +         2d:ea:02:ae:42:e1:88:22:5a:b0:cf:e5:31:b1:cb:d3:e9:d2:
    +         5e:88:55:bd:62:ac:85:aa:4e:fc:18:6b:65:f9:9e:fc:93:27:
    +         0c:c6:29:aa:f0:64:6e:72:dc:d9:95:ae:38:ae:64:9e:c6:44:
    +         8a:0b:0f:0e:d4:69:7e:79:e0:46:d0:75:96:2a:1a:60:af:30:
    +         23:dc:d2:67:0d:08:2a:9d:58:29:09:1e:c8:08:d5:3a:88:2d:
    +         1a:dc:47:dc:5d:bd:0d:5c:54:f1:5d:5a:6d:0d:de:bc:18:67:
    +         2d:dd:1b:fe:8b:0e:03:19:b0:0f:f2:59:69:d0:7a:4f:a1:33:
    +         74:f7:22:ef:ff:90:e1:4b:8e:ac:13:00:6f:00:9b:55:83:d2:
    +         96:db:a8:81:c9:a9:8d:c6:a6:21:3d:14:d3:43:71:28:c6:ea:
    +         6d:2d:91:b9:58:bf:ec:18:75:c4:8c:10:43:88:60:08:c0:bb:
    +         9d:fb:90:80:1e:d5:a3:ea:e7:8a:16:f7:f4:d7:cb:35:93:03:
    +         55:e4:cc:58:31:1e:df:6e:e4:1b:6e:ad:3a:76:56:e5:8b:4e:
    +         d9:71:af:11:92:a7:7a:e2:66:cc:d2:73:f3:ec:e8:3b:67:f0:
    +         6a:31:10:82:e8:c4:1e:ae:c3:54:a7:e2:42:86:fe:43:75:ad:
    +         ef:83:d7:1c:2f:91:94:1c:57:9d:1c:43:94:b1:47:b2:6c:96:
    +         fd:83:69:0f:6c:e2:18:9b:65:8e:71:08:01:b3:73:46:aa:3c:
    +         2e:07:14:cd:03:ae:dc:5a:51:da:c5:41:53:cc:f5:fc:c8:db:
    +         4e:76:27:99:9a:ec:40:68:07:d6:10:e1:f9:68:6b:5d:52:95:
    +         3d:01:f4:a7:40:11:61:0a
    +-----BEGIN CERTIFICATE-----
    +MIIEpzCCAo+gAwIBAgICEAgwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    +YmFyMCAXDTIzMDUxMDE1NTAxOVoYDzIyOTcwMjIyMTU1MDE5WjAeMRwwGgYDVQQD
    +DBNwcm94eS1sb2NhbGhvc3QtU0FOMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
    +CgKCAQEAzBXJhQZDR71Gn08DGuBulBNOsDDqiMo65DmSEsF3UYwNPLkmXC/c/LFa
    +vw5H/wlgMHmOVSb+0KHtn22KagaF8NDclKZUoabJPlfVaX3pJcHva3fhYnbY5FSR
    +QLwLEXS4MLvUAnfWvdLQ563ffZiWdEKtU7OIyNwd21FjhO5+hXMUXtTI8AFfZ1Lt
    +lIf31qooiyyEmIy5kbU4mYBds9TblZYJ7x2hb4bIF4b3Ch5yO1CMU+XO1IzPzIE9
    +RlX/ZSULNjExpiInR5ZZOMHNZqaag5jcuC4QjbpFrqogbuMLvezmY7VAVdT+l7Hx
    +jZrAokaOo+2gG+1AsAClKPnaA73BqQIDAQABo4H5MIH2MAkGA1UdEwQCMAAwEQYJ
    +YIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRl
    +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFMUzc2cDt1EI9L3TzU/cz4MR
    +U605MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATBBBgNVHSMEOjA4gBRXC+nL
    +I+i/Rz5Qej9FfqEYQ50VJ6EVpBMwETEPMA0GA1UEAwwGZm9vYmFyggkA1+KHT6B5
    +4gwwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3
    +DQEBCwUAA4ICAQBD72cpmgxTl3z8cnNsjUh4TuzjFJ3ZHoNM1vBW6cTY3vVU+6U7
    +/1kjdSZ08IaQ0E1BJQOH4GCkmzM9vRx5uNuGHDgJJg2APvkeKBENPWseGnqa+vwY
    +In/9RlXCL1ZcXIpF8nR65GzQ4OrsdLcNqPPKGM+kvqDgSjLKFX5dBla3cXzg3Bn6
    +vj6UhCC+ljRhC/DR1jFJC7AguPlcSQgTm0XAb1gWgQsM+GY4WIPUsLwUNY3iHdUt
    +6gKuQuGIIlqwz+UxscvT6dJeiFW9YqyFqk78GGtl+Z78kycMximq8GRuctzZla44
    +rmSexkSKCw8O1Gl+eeBG0HWWKhpgrzAj3NJnDQgqnVgpCR7ICNU6iC0a3EfcXb0N
    +XFTxXVptDd68GGct3Rv+iw4DGbAP8llp0HpPoTN09yLv/5DhS46sEwBvAJtVg9KW
    +26iByamNxqYhPRTTQ3EoxuptLZG5WL/sGHXEjBBDiGAIwLud+5CAHtWj6ueKFvf0
    +18s1kwNV5MxYMR7fbuQbbq06dlbli07Zca8Rkqd64mbM0nPz7Og7Z/BqMRCC6MQe
    +rsNUp+JChv5Dda3vg9ccL5GUHFedHEOUsUeybJb9g2kPbOIYm2WOcQgBs3NGqjwu
    +BxTNA67cWlHaxUFTzPX8yNtOdieZmuxAaAfWEOH5aGtdUpU9AfSnQBFhCg==
    +-----END CERTIFICATE-----
    diff --git a/tests/certificate-authority/openssl.cnf b/tests/certificate-authority/openssl.cnf
    index 9c8585edc9a1f..f7a23b3b33f6d 100644
    --- a/tests/certificate-authority/openssl.cnf
    +++ b/tests/certificate-authority/openssl.cnf
    @@ -27,7 +27,7 @@ default_ca = CA_default
     
     [ CA_default ]
     # Directory and file locations.
    -dir               = $ENV::CA_HOME
    +dir               = .
     certs             = $dir/certs
     crl_dir           = $dir/crl
     new_certs_dir     = $dir/newcerts
    @@ -92,12 +92,25 @@ authorityKeyIdentifier = keyid,issuer
     keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
     extendedKeyUsage = clientAuth, emailProtection
     
    -[ server_cert ]
    +[ broker_cert ]
     # Extensions for server certificates (`man x509v3_config`).
     basicConstraints = CA:FALSE
     nsCertType = server
     nsComment = "OpenSSL Generated Server Certificate"
     subjectKeyIdentifier = hash
    +# The unresolvable address is used for SNI testing
    +subjectAltName = DNS:localhost, DNS:unresolvable-broker-address, IP:127.0.0.1
    +authorityKeyIdentifier = keyid,issuer:always
    +keyUsage = critical, digitalSignature, keyEncipherment
    +extendedKeyUsage = serverAuth
    +
    +[ proxy_cert ]
    +# Extensions for server certificates (`man x509v3_config`).
    +basicConstraints = CA:FALSE
    +nsCertType = server
    +nsComment = "OpenSSL Generated Server Certificate"
    +subjectKeyIdentifier = hash
    +subjectAltName = DNS:localhost, IP:127.0.0.1
     authorityKeyIdentifier = keyid,issuer:always
     keyUsage = critical, digitalSignature, keyEncipherment
     extendedKeyUsage = serverAuth
    diff --git a/tests/certificate-authority/serial b/tests/certificate-authority/serial
    index fb35a14c02716..6cb3869343bf8 100644
    --- a/tests/certificate-authority/serial
    +++ b/tests/certificate-authority/serial
    @@ -1 +1 @@
    -1007
    +1009
    diff --git a/tests/certificate-authority/server-keys/broker.cert.pem b/tests/certificate-authority/server-keys/broker.cert.pem
    index b5c7a5dc709a1..4237719f20ebd 100644
    --- a/tests/certificate-authority/server-keys/broker.cert.pem
    +++ b/tests/certificate-authority/server-keys/broker.cert.pem
    @@ -1,27 +1,111 @@
    +Certificate:
    +    Data:
    +        Version: 3 (0x2)
    +        Serial Number: 4103 (0x1007)
    +    Signature Algorithm: sha256WithRSAEncryption
    +        Issuer: CN=foobar
    +        Validity
    +            Not Before: May 10 15:50:18 2023 GMT
    +            Not After : Feb 22 15:50:18 2297 GMT
    +        Subject: CN=broker-localhost-SAN
    +        Subject Public Key Info:
    +            Public Key Algorithm: rsaEncryption
    +                Public-Key: (2048 bit)
    +                Modulus:
    +                    00:de:d1:da:bb:91:b3:16:c4:b2:e8:89:30:9e:c1:
    +                    5e:0b:cf:db:c4:c3:d9:b1:af:40:a5:0b:38:36:1b:
    +                    14:fe:0f:22:9c:e6:59:6a:15:5b:db:f6:f7:f3:a5:
    +                    02:29:94:7a:d2:0c:67:ad:aa:63:62:7e:fc:58:11:
    +                    29:48:b8:3c:91:b2:73:7e:12:6b:f2:ea:36:77:0f:
    +                    15:9b:46:95:ce:73:15:8d:c8:d9:97:57:03:90:33:
    +                    2d:7d:f3:ee:e5:01:6d:d8:c6:da:ab:07:b9:dd:1c:
    +                    e0:4b:ce:6a:de:a8:d2:e3:c1:52:6d:83:3a:0a:f0:
    +                    ed:cf:f7:56:6a:87:0e:73:e3:12:82:2b:65:ab:d8:
    +                    a9:44:5b:4a:2f:a5:92:94:32:f1:a1:e4:af:18:0f:
    +                    0f:18:60:cd:f7:d0:9d:03:9f:d7:e9:a8:60:54:bb:
    +                    3b:9a:05:db:fd:38:04:3c:b4:23:41:16:6c:7c:3b:
    +                    d9:b6:e0:2f:bd:cb:62:55:1b:e8:d0:8f:43:76:ef:
    +                    55:86:cf:25:c3:bc:ae:e3:46:50:89:f7:71:ad:06:
    +                    5e:28:e6:f6:f0:76:27:ea:7e:1b:67:53:39:26:20:
    +                    19:18:82:b1:11:5f:ea:91:c2:e3:d3:f6:5a:c7:fd:
    +                    61:a2:92:de:7d:7c:da:6d:e8:bf:39:52:10:31:60:
    +                    4b:e1
    +                Exponent: 65537 (0x10001)
    +        X509v3 extensions:
    +            X509v3 Basic Constraints: 
    +                CA:FALSE
    +            Netscape Cert Type: 
    +                SSL Server
    +            Netscape Comment: 
    +                OpenSSL Generated Server Certificate
    +            X509v3 Subject Key Identifier: 
    +                17:07:3B:AA:85:83:B5:04:83:EC:B2:6C:1E:3A:F0:F5:59:AA:61:28
    +            X509v3 Subject Alternative Name: 
    +                DNS:localhost, DNS:unresolvable-broker-address, IP Address:127.0.0.1
    +            X509v3 Authority Key Identifier: 
    +                keyid:57:0B:E9:CB:23:E8:BF:47:3E:50:7A:3F:45:7E:A1:18:43:9D:15:27
    +                DirName:/CN=foobar
    +                serial:D7:E2:87:4F:A0:79:E2:0C
    +
    +            X509v3 Key Usage: critical
    +                Digital Signature, Key Encipherment
    +            X509v3 Extended Key Usage: 
    +                TLS Web Server Authentication
    +    Signature Algorithm: sha256WithRSAEncryption
    +         e4:27:61:e2:0f:b6:a0:ca:9f:ce:e3:53:0b:44:ab:86:a1:e2:
    +         4d:88:e1:7d:2e:b0:aa:32:96:2b:3d:da:60:70:6a:c3:62:c5:
    +         76:f2:8f:0d:16:31:f2:ad:e5:2f:43:f3:cb:e4:fa:95:6c:20:
    +         81:33:1a:c7:5a:55:57:c9:ab:ca:66:45:30:58:00:db:e8:51:
    +         c9:2c:a9:72:c1:18:f5:01:87:9f:73:20:85:6c:e5:6c:3f:c9:
    +         67:b4:f0:20:e5:ed:e2:4a:08:0b:af:68:43:e5:a9:c7:e1:39:
    +         e8:b5:49:cb:47:4a:6d:e5:16:ae:88:92:13:85:8e:42:1e:0a:
    +         eb:59:ed:a7:c1:9b:bc:4b:7b:99:f8:1d:f0:d7:1d:90:c9:cf:
    +         86:6a:d3:10:d0:36:e4:f5:b9:33:79:c7:a2:68:31:f7:bb:8d:
    +         1e:d6:33:79:bd:e7:0e:4f:4d:e9:2e:15:04:4f:6b:4b:2e:93:
    +         28:72:d1:0e:aa:ee:e6:ef:68:be:58:2b:cc:56:01:27:16:f9:
    +         34:8e:66:86:27:0a:b0:fb:32:56:a9:8a:d9:6f:b1:86:bd:ba:
    +         fd:50:6c:d5:b2:54:e7:4e:c6:2d:19:88:a9:89:2c:ef:be:08:
    +         0d:2b:49:91:0b:09:42:64:06:a3:9d:d7:94:ed:e8:74:74:48:
    +         43:57:41:6f:e5:06:98:46:1d:c5:60:9c:69:f8:fb:fe:a6:01:
    +         4a:35:be:21:36:c2:a3:44:c8:c4:2c:21:09:f4:28:9a:ad:a0:
    +         97:1e:00:29:cc:0f:26:fa:59:21:25:c0:9e:fa:22:53:67:6d:
    +         ab:a6:56:08:fd:37:1d:69:fe:ef:6f:29:89:1a:66:7b:c7:ff:
    +         b1:34:f1:d6:be:21:81:e3:bc:4f:13:02:a7:4b:9d:13:05:46:
    +         40:88:4a:aa:db:fb:64:f8:6b:fb:5d:a0:b1:0c:1a:b8:4c:ab:
    +         6f:69:fe:0b:55:4e:b3:38:1f:91:0b:71:77:1e:11:39:54:9a:
    +         62:51:ea:6d:a8:5e:0d:4a:91:fb:d8:be:5d:93:e8:43:f3:4a:
    +         11:fb:31:cf:14:1a:1c:8d:31:1b:99:31:e0:2b:81:01:91:6f:
    +         da:ba:cb:1f:51:21:55:29:3f:4c:71:e3:d0:29:41:de:a0:00:
    +         da:07:ed:5e:c9:af:32:61:6d:55:f8:f5:2d:46:03:34:33:fb:
    +         2e:1e:aa:7c:fe:d2:30:4d:40:cc:ed:76:ec:f6:bd:ed:35:c8:
    +         d8:b3:46:56:aa:2c:53:84:56:45:b0:a3:f6:35:66:93:da:8c:
    +         17:39:c1:29:7c:99:c5:0b:73:c1:f9:16:d0:57:fc:57:59:06:
    +         af:39:9f:a9:51:35:0b:c7
     -----BEGIN CERTIFICATE-----
    -MIIEkDCCAnigAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTE4MDYyMjA4NTUzMloYDzIyOTIwNDA2MDg1NTMyWjAjMSEwHwYDVQQD
    -DBhicm9rZXIucHVsc2FyLmFwYWNoZS5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IB
    -DwAwggEKAoIBAQDQouKhZah4hMCqmg4aS5RhQG/Y1gA+yP9DGF9mlw35tfhfWs63
    -EvNjEK4L/ZWSEV45L/wc6YV14RmM6bJ0V/0vXo4xmISbqptND/2kRIspkLZQ5F0O
    -OQXVicqZLOc6igZQhRg8ANDYdTJUTF65DqauX4OJt3YMhF2FSt7jQtlj06IQBa01
    -+ARO9OotMJtBY+vIU5bV6JydfgkhQH9rIDI7AMeY5j02gGkJJrelfm+WoOsUez+X
    -aqTN3/tF8+MBcFB3G04s1qc2CJPJM3YGxvxEtHqTGI14t9J8p5O7X9JHpcY8X00s
    -bxa4FGbKgfDobbkJ+GgblWCkAcLN95sKTqtHAgMBAAGjgd0wgdowCQYDVR0TBAIw
    -ADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2Vu
    -ZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUaxFvJrkEGqk8azTA
    -DyVyTyTbJAIwQQYDVR0jBDowOIAUVwvpyyPov0c+UHo/RX6hGEOdFSehFaQTMBEx
    -DzANBgNVBAMMBmZvb2JhcoIJANfih0+geeIMMA4GA1UdDwEB/wQEAwIFoDATBgNV
    -HSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAgEA35QDGclHzQtHs3yQ
    -ZzNOSKisg5srTiIoQgRzfHrXfkthNFCnBzhKjBxqk3EIasVtvyGuk0ThneC1ai3y
    -ZK3BivnMZfm1SfyvieFoqWetsxohWfcpOSVkpvO37P6v/NmmaTIGkBN3gxKCx0QN
    -zqApLQyNTM++X3wxetYH/afAGUrRmBGWZuJheQpB9yZ+FB6BRp8YuYIYBzANJyW9
    -spvXW03TpqX2AIoRBoGMLzK72vbhAbLWiCIfEYREhbZVRkP+yvD338cWrILlOEur
    -x/n8L/FTmbf7mXzHg4xaQ3zg/5+0OCPMDPUBE4xWDBAbZ82hgOcTqfVjwoPgo2V0
    -fbbx6redq44J3Vn5d9Xhi59fkpqEjHpX4xebr5iMikZsNTJMeLh0h3uf7DstuO9d
    -mfnF5j+yDXCKb9XzCsTSvGCN+spmUh6RfSrbkw8/LrRvBUpKVEM0GfKSnaFpOaSS
    -efM4UEi72FRjszzHEkdvpiLhYvihINLJmDXszhc3fCi42be/DGmUhuhTZWynOPmp
    -0N0V/8/sGT5gh4fGEtGzS/8xEvZwO9uDlccJiG8Pi+aO0/K9urB9nppd/xKWXv3C
    -cib/QrW0Qow4TADWC1fnGYCpFzzaZ2esPL2MvzOYXnW4/AbEqmb6Weatluai64ZK
    -3N2cGJWRyvpvvmbP2hKCa4eLgEc=
    +MIIExzCCAq+gAwIBAgICEAcwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    +YmFyMCAXDTIzMDUxMDE1NTAxOFoYDzIyOTcwMjIyMTU1MDE4WjAfMR0wGwYDVQQD
    +DBRicm9rZXItbG9jYWxob3N0LVNBTjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
    +AQoCggEBAN7R2ruRsxbEsuiJMJ7BXgvP28TD2bGvQKULODYbFP4PIpzmWWoVW9v2
    +9/OlAimUetIMZ62qY2J+/FgRKUi4PJGyc34Sa/LqNncPFZtGlc5zFY3I2ZdXA5Az
    +LX3z7uUBbdjG2qsHud0c4EvOat6o0uPBUm2DOgrw7c/3VmqHDnPjEoIrZavYqURb
    +Si+lkpQy8aHkrxgPDxhgzffQnQOf1+moYFS7O5oF2/04BDy0I0EWbHw72bbgL73L
    +YlUb6NCPQ3bvVYbPJcO8ruNGUIn3ca0GXijm9vB2J+p+G2dTOSYgGRiCsRFf6pHC
    +49P2Wsf9YaKS3n182m3ovzlSEDFgS+ECAwEAAaOCARcwggETMAkGA1UdEwQCMAAw
    +EQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVy
    +YXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFBcHO6qFg7UEg+yybB46
    +8PVZqmEoMDcGA1UdEQQwMC6CCWxvY2FsaG9zdIIbdW5yZXNvbHZhYmxlLWJyb2tl
    +ci1hZGRyZXNzhwR/AAABMEEGA1UdIwQ6MDiAFFcL6csj6L9HPlB6P0V+oRhDnRUn
    +oRWkEzARMQ8wDQYDVQQDDAZmb29iYXKCCQDX4odPoHniDDAOBgNVHQ8BAf8EBAMC
    +BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggIBAOQnYeIP
    +tqDKn87jUwtEq4ah4k2I4X0usKoylis92mBwasNixXbyjw0WMfKt5S9D88vk+pVs
    +IIEzGsdaVVfJq8pmRTBYANvoUcksqXLBGPUBh59zIIVs5Ww/yWe08CDl7eJKCAuv
    +aEPlqcfhOei1SctHSm3lFq6IkhOFjkIeCutZ7afBm7xLe5n4HfDXHZDJz4Zq0xDQ
    +NuT1uTN5x6JoMfe7jR7WM3m95w5PTekuFQRPa0sukyhy0Q6q7ubvaL5YK8xWAScW
    ++TSOZoYnCrD7MlapitlvsYa9uv1QbNWyVOdOxi0ZiKmJLO++CA0rSZELCUJkBqOd
    +15Tt6HR0SENXQW/lBphGHcVgnGn4+/6mAUo1viE2wqNEyMQsIQn0KJqtoJceACnM
    +Dyb6WSElwJ76IlNnbaumVgj9Nx1p/u9vKYkaZnvH/7E08da+IYHjvE8TAqdLnRMF
    +RkCISqrb+2T4a/tdoLEMGrhMq29p/gtVTrM4H5ELcXceETlUmmJR6m2oXg1KkfvY
    +vl2T6EPzShH7Mc8UGhyNMRuZMeArgQGRb9q6yx9RIVUpP0xx49ApQd6gANoH7V7J
    +rzJhbVX49S1GAzQz+y4eqnz+0jBNQMztduz2ve01yNizRlaqLFOEVkWwo/Y1ZpPa
    +jBc5wSl8mcULc8H5FtBX/FdZBq85n6lRNQvH
     -----END CERTIFICATE-----
    diff --git a/tests/certificate-authority/server-keys/broker.csr.pem b/tests/certificate-authority/server-keys/broker.csr.pem
    index d2342595eb254..9d28c52be7909 100644
    --- a/tests/certificate-authority/server-keys/broker.csr.pem
    +++ b/tests/certificate-authority/server-keys/broker.csr.pem
    @@ -1,15 +1,15 @@
     -----BEGIN CERTIFICATE REQUEST-----
    -MIICaDCCAVACAQAwIzEhMB8GA1UEAwwYYnJva2VyLnB1bHNhci5hcGFjaGUub3Jn
    -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0KLioWWoeITAqpoOGkuU
    -YUBv2NYAPsj/QxhfZpcN+bX4X1rOtxLzYxCuC/2VkhFeOS/8HOmFdeEZjOmydFf9
    -L16OMZiEm6qbTQ/9pESLKZC2UORdDjkF1YnKmSznOooGUIUYPADQ2HUyVExeuQ6m
    -rl+Dibd2DIRdhUre40LZY9OiEAWtNfgETvTqLTCbQWPryFOW1eicnX4JIUB/ayAy
    -OwDHmOY9NoBpCSa3pX5vlqDrFHs/l2qkzd/7RfPjAXBQdxtOLNanNgiTyTN2Bsb8
    -RLR6kxiNeLfSfKeTu1/SR6XGPF9NLG8WuBRmyoHw6G25CfhoG5VgpAHCzfebCk6r
    -RwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAHVVGKnfqBDmu+e5MWK9i0ja/JFv
    -dhST705gdKDOPc7MXDVr+zJZKgvnDtzDrWTe7Zk0p7xQf3kc773eYCdlznX+J1Fw
    -EfIHXQTBZRZxmHnYqc012i5tshvEOS0o61ZEgxz8hxGLwGlRaIcy+qt927fscpQ5
    -7VEnlxzD4YeHwryIXH5hOr/J1OmlL58Fxwh2NJfso7ErRuHW44XK4qdwWCQs/nVN
    -EQyV6RCbaiRq9Ks4j3FwtqmfgzMB1+T3L+CiuhPol2/rZwD3o5j7SP8ZGxC15Tzi
    -wHG71H0wp1CY+tkAcvm2zmoHR9z1SD84raZLYJVRgUio7myW/DVBqPxCSvU=
    +MIICZDCCAUwCAQAwHzEdMBsGA1UEAwwUYnJva2VyLWxvY2FsaG9zdC1TQU4wggEi
    +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDe0dq7kbMWxLLoiTCewV4Lz9vE
    +w9mxr0ClCzg2GxT+DyKc5llqFVvb9vfzpQIplHrSDGetqmNifvxYESlIuDyRsnN+
    +Emvy6jZ3DxWbRpXOcxWNyNmXVwOQMy198+7lAW3YxtqrB7ndHOBLzmreqNLjwVJt
    +gzoK8O3P91Zqhw5z4xKCK2Wr2KlEW0ovpZKUMvGh5K8YDw8YYM330J0Dn9fpqGBU
    +uzuaBdv9OAQ8tCNBFmx8O9m24C+9y2JVG+jQj0N271WGzyXDvK7jRlCJ93GtBl4o
    +5vbwdifqfhtnUzkmIBkYgrERX+qRwuPT9lrH/WGikt59fNpt6L85UhAxYEvhAgMB
    +AAGgADANBgkqhkiG9w0BAQsFAAOCAQEAcLkzWe6zgkVpk4OUlCv1HUqmntiBHdmh
    +24v3OKhQjyMs+m/srBe6r+lBdZLnbSH5fq7eToEwRMt/vNirsXCaxGGVOUnThStt
    +Z/rR45bpJl1TomXwEG9xHq7yOaHVfxkNZgaHCu6BZ1ZjYgfqWffFf9hcqAJ3xZm0
    +XMPfJs9i/TRHCsdUge1BHZrxD/fzriWM7k89XktY+0zSqGfdhOXE0FO30bnC4mJG
    +vZXY5reyIuRlFBpnDUtuYBaSfbUYguaSUYIoHOUsQrmMSqPLJUyY1zNm1t5f1jIx
    +ZaK8NEIq2AHHqEx7I/7+lVRRWa9IjdqWIT9KD5TraOML9oS74sto2w==
     -----END CERTIFICATE REQUEST-----
    diff --git a/tests/certificate-authority/server-keys/broker.key-pk8.pem b/tests/certificate-authority/server-keys/broker.key-pk8.pem
    index 2b51d015b8ace..dd9fa523e8ede 100644
    --- a/tests/certificate-authority/server-keys/broker.key-pk8.pem
    +++ b/tests/certificate-authority/server-keys/broker.key-pk8.pem
    @@ -1,28 +1,28 @@
     -----BEGIN PRIVATE KEY-----
    -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDQouKhZah4hMCq
    -mg4aS5RhQG/Y1gA+yP9DGF9mlw35tfhfWs63EvNjEK4L/ZWSEV45L/wc6YV14RmM
    -6bJ0V/0vXo4xmISbqptND/2kRIspkLZQ5F0OOQXVicqZLOc6igZQhRg8ANDYdTJU
    -TF65DqauX4OJt3YMhF2FSt7jQtlj06IQBa01+ARO9OotMJtBY+vIU5bV6Jydfgkh
    -QH9rIDI7AMeY5j02gGkJJrelfm+WoOsUez+XaqTN3/tF8+MBcFB3G04s1qc2CJPJ
    -M3YGxvxEtHqTGI14t9J8p5O7X9JHpcY8X00sbxa4FGbKgfDobbkJ+GgblWCkAcLN
    -95sKTqtHAgMBAAECggEBALE1eMtfnk3nbAI74bih84D7C0Ug14p8jJv/qqBnsx4j
    -WrgbWDMVrJa7Rym2FQHBMMfgIwKnso0iSeJvaPz683j1lk833YKe0VQOPgD1m0IN
    -wV1J6mQ3OOZcKDIcerY1IBHqSmBEzR7dxIbnaxlCAX9gb0hdBK6zCwA5TMG5OQ5Y
    -3cGOmevK5i2PiejhpruA8h7E48P1ATaGHUZif9YD724oi6AcilQ8H/DlOjZTvlmK
    -r4aJ30f72NwGM8Ecet5CE2wyflAGtY0k+nChYkPRfy54u64Z/T9B53AvneFaj8jv
    -yFepZgRTs2cWhEl0KQGuBHQ4+IeOfMt2LebhvjWW8YkCgYEA7BXVsnqPHKRDd8wP
    -eNkolY4Fjdq4wu9ad+DaFiZcJuv7ugr+Kplltq6e4aU36zEdBYdPp/6KM/HGE/Xj
    -bo0CELNUKs/Ny9H/UJc8DDbVEmoF3XGiIbKKq1T8NTXTETFnwrGkBFD8nl7YTsOF
    -M4FZmSok0MhhkpEULAqxBS6YpQsCgYEA4jxM1egTVSWjTreg2UdYo2507jKa7maP
    -PRtoPsNJzWNbOpfj26l3/8pd6oYKWck6se6RxIUxUrk3ywhNJIIOvWEC7TaOH1c9
    -T4NQNcweqBW9+A1x5gyzT14gDaBfl45gs82vI+kcpVv/w2N3HZOQZX3yAUqWpfw2
    -yw1uQDXtgDUCgYEAiYPWbBXTkp1j5z3nrT7g0uxc89n5USLWkYlZvxktCEbg4+dP
    -UUT06EoipdD1F3wOKZA9p98uZT9pX2sUxOpBz7SFTEKq3xQ9IZZWFc9CoW08aVat
    -V++FsnLYTa5CeXtLsy6CGTmLTDx2xrpAtlWb+QmBVFPD8fmrxFOd9STFKS0CgYAt
    -6ztVN3OlFqyc75yQPXD6SxMkvdTAisSMDKIOCylRrNb5f5baIP2gR3zkeyxiqPtm
    -3htsHfSy67EtXpP50wQW4Dft2eLi7ZweJXMEWFfomfEjBeeWYAGNHHe5DFIauuVZ
    -2WexDEGqNpAlIm0s7aSjVPrn1DHbouOkNyenlMqN+QKBgQDVYVhk9widShSnCmUA
    -G30moXDgj3eRqCf5T7NEr9GXD1QBD/rQSPh5agnDV7IYLpV7/wkYLI7l9x7mDwu+
    -I9mRXkyAmTVEctLTdXQHt0jdJa5SfUaVEDUzQbr0fUjkmythTvqZ809+d3ELPeLI
    -5qJ7jxgksHWji4lYfL4r4J6Zaw==
    +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDe0dq7kbMWxLLo
    +iTCewV4Lz9vEw9mxr0ClCzg2GxT+DyKc5llqFVvb9vfzpQIplHrSDGetqmNifvxY
    +ESlIuDyRsnN+Emvy6jZ3DxWbRpXOcxWNyNmXVwOQMy198+7lAW3YxtqrB7ndHOBL
    +zmreqNLjwVJtgzoK8O3P91Zqhw5z4xKCK2Wr2KlEW0ovpZKUMvGh5K8YDw8YYM33
    +0J0Dn9fpqGBUuzuaBdv9OAQ8tCNBFmx8O9m24C+9y2JVG+jQj0N271WGzyXDvK7j
    +RlCJ93GtBl4o5vbwdifqfhtnUzkmIBkYgrERX+qRwuPT9lrH/WGikt59fNpt6L85
    +UhAxYEvhAgMBAAECggEAbQ4xDFTHXoFvPzjGPy1NJmLZoXhp9/lanmzbWj/vClnG
    +Cx0C7lT93K8HtIwyfr9ZTa0coXcfpXmZcFEV762cl4LL3AyQIRhZB/SuEo19jMnu
    +5rJDLTs9Vzp1LYxShGsqpErPg54IbhxP+0pQLCJc9XQNL+RmaCx7eKoJ9aGchULY
    +BdbCRctYhHaq/AC2qNYZF41Ys9zdNN0/2NnRqfgaaAj9hUzu3LlaJX1TWHbgikLo
    +QdmRNmTMMxfLl2kyoweDEC6MdSKCbcdDyM46Va3yY3KTuAdjP6x1ALzAarBDj6zb
    +a0Vp7g80OcKjmLYt8rFImjwb7D9EanOy2GkK2CwhAQKBgQD8v4gOTeNYC4Xo6Rti
    +psv+QCuH8hiLdee4KFdzqthlELDfhncDKXfwcZI3PME/aBWvvAn+rokl/UfCm5nQ
    +fwXW3MYyNpmk0HJjWAQcexsdHCM9I0CgEp8uInn+8EFqlYVh2ltoQLhGuIwOqqPk
    +3cQV8ImW+CmqmWYzbSZw97OqqQKBgQDhr7+oOf+sly0MVf5WixHTURJAU6p2VyTt
    +aNsNiuLN/W3Vax9Ql9HEm3RPx2SxFEIxllPcwb1Vms/ONmvT1t3xWGp7TIOX/0/m
    +uhNIG2/Bcr47NgWjhiV4zE/TfawkcP7/MujUxl2/zm7RHLrU2bmi8rV+PGqlVE0w
    +v7iKj4bSeQKBgHkI34a6Fdzb58yZlNuxNI8U+8OmU8q1M7ok13w0nFwJminwop2J
    +Bj7GpFZ/aauLlJcLXV3xBwyCNhMjoI0PxyQVpXP2Ya1jhOO+Cnn5GgrepqFoeFIv
    +mLrnF7TWKP15jN5HSu6pz5VOWwPLA6Fd8cDv53O8c3eW7jJCWt5OQGPBAoGALwbI
    +EO3E8NmvcVqZ3L6twDKscur8IhyWfUHUI0ZFbFbahBYGOGzqMOWTnuwVdzCZemuw
    +nddg9G2Fz5pXbZTgOmIKDhcrdIimxZUQX34YE18tdHkVQ7W4KSuplpAhRpalC9g3
    +295ZupXxUXGDHMchf2rDlsJQFpMyYm4Qrg6qMUECgYBhBG/iLvE6/Z1oh6eky6WU
    +Dx7nL+FDDu3JbNAWs6RMDs9fE+7iDEj8MxZL0PUnVMsQvf43Ew1boyIGwlNtdRZ5
    +dLaKFAgJ8YbqIptnFfGQ/8vKhK+x4FWzEd3kuRzQZPVRVV5Op6bRp1Kb8NMPCQXb
    +72qifYUaI7uirULNSit9wg==
     -----END PRIVATE KEY-----
    diff --git a/tests/certificate-authority/server-keys/broker.key.pem b/tests/certificate-authority/server-keys/broker.key.pem
    index dc22667ab47a5..5c20238c7b9c9 100644
    --- a/tests/certificate-authority/server-keys/broker.key.pem
    +++ b/tests/certificate-authority/server-keys/broker.key.pem
    @@ -1,27 +1,27 @@
     -----BEGIN RSA PRIVATE KEY-----
    -MIIEpQIBAAKCAQEA0KLioWWoeITAqpoOGkuUYUBv2NYAPsj/QxhfZpcN+bX4X1rO
    -txLzYxCuC/2VkhFeOS/8HOmFdeEZjOmydFf9L16OMZiEm6qbTQ/9pESLKZC2UORd
    -DjkF1YnKmSznOooGUIUYPADQ2HUyVExeuQ6mrl+Dibd2DIRdhUre40LZY9OiEAWt
    -NfgETvTqLTCbQWPryFOW1eicnX4JIUB/ayAyOwDHmOY9NoBpCSa3pX5vlqDrFHs/
    -l2qkzd/7RfPjAXBQdxtOLNanNgiTyTN2Bsb8RLR6kxiNeLfSfKeTu1/SR6XGPF9N
    -LG8WuBRmyoHw6G25CfhoG5VgpAHCzfebCk6rRwIDAQABAoIBAQCxNXjLX55N52wC
    -O+G4ofOA+wtFINeKfIyb/6qgZ7MeI1q4G1gzFayWu0cpthUBwTDH4CMCp7KNIkni
    -b2j8+vN49ZZPN92CntFUDj4A9ZtCDcFdSepkNzjmXCgyHHq2NSAR6kpgRM0e3cSG
    -52sZQgF/YG9IXQSuswsAOUzBuTkOWN3BjpnryuYtj4no4aa7gPIexOPD9QE2hh1G
    -Yn/WA+9uKIugHIpUPB/w5To2U75Ziq+Gid9H+9jcBjPBHHreQhNsMn5QBrWNJPpw
    -oWJD0X8ueLuuGf0/QedwL53hWo/I78hXqWYEU7NnFoRJdCkBrgR0OPiHjnzLdi3m
    -4b41lvGJAoGBAOwV1bJ6jxykQ3fMD3jZKJWOBY3auMLvWnfg2hYmXCbr+7oK/iqZ
    -ZbaunuGlN+sxHQWHT6f+ijPxxhP1426NAhCzVCrPzcvR/1CXPAw21RJqBd1xoiGy
    -iqtU/DU10xExZ8KxpARQ/J5e2E7DhTOBWZkqJNDIYZKRFCwKsQUumKULAoGBAOI8
    -TNXoE1Ulo063oNlHWKNudO4ymu5mjz0baD7DSc1jWzqX49upd//KXeqGClnJOrHu
    -kcSFMVK5N8sITSSCDr1hAu02jh9XPU+DUDXMHqgVvfgNceYMs09eIA2gX5eOYLPN
    -ryPpHKVb/8Njdx2TkGV98gFKlqX8NssNbkA17YA1AoGBAImD1mwV05KdY+c9560+
    -4NLsXPPZ+VEi1pGJWb8ZLQhG4OPnT1FE9OhKIqXQ9Rd8DimQPaffLmU/aV9rFMTq
    -Qc+0hUxCqt8UPSGWVhXPQqFtPGlWrVfvhbJy2E2uQnl7S7Mughk5i0w8dsa6QLZV
    -m/kJgVRTw/H5q8RTnfUkxSktAoGALes7VTdzpRasnO+ckD1w+ksTJL3UwIrEjAyi
    -DgspUazW+X+W2iD9oEd85HssYqj7Zt4bbB30suuxLV6T+dMEFuA37dni4u2cHiVz
    -BFhX6JnxIwXnlmABjRx3uQxSGrrlWdlnsQxBqjaQJSJtLO2ko1T659Qx26LjpDcn
    -p5TKjfkCgYEA1WFYZPcInUoUpwplABt9JqFw4I93kagn+U+zRK/Rlw9UAQ/60Ej4
    -eWoJw1eyGC6Ve/8JGCyO5fce5g8LviPZkV5MgJk1RHLS03V0B7dI3SWuUn1GlRA1
    -M0G69H1I5JsrYU76mfNPfndxCz3iyOaie48YJLB1o4uJWHy+K+CemWs=
    +MIIEogIBAAKCAQEA3tHau5GzFsSy6IkwnsFeC8/bxMPZsa9ApQs4NhsU/g8inOZZ
    +ahVb2/b386UCKZR60gxnrapjYn78WBEpSLg8kbJzfhJr8uo2dw8Vm0aVznMVjcjZ
    +l1cDkDMtffPu5QFt2Mbaqwe53RzgS85q3qjS48FSbYM6CvDtz/dWaocOc+MSgitl
    +q9ipRFtKL6WSlDLxoeSvGA8PGGDN99CdA5/X6ahgVLs7mgXb/TgEPLQjQRZsfDvZ
    +tuAvvctiVRvo0I9Ddu9Vhs8lw7yu40ZQifdxrQZeKOb28HYn6n4bZ1M5JiAZGIKx
    +EV/qkcLj0/Zax/1hopLefXzabei/OVIQMWBL4QIDAQABAoIBAG0OMQxUx16Bbz84
    +xj8tTSZi2aF4aff5Wp5s21o/7wpZxgsdAu5U/dyvB7SMMn6/WU2tHKF3H6V5mXBR
    +Fe+tnJeCy9wMkCEYWQf0rhKNfYzJ7uayQy07PVc6dS2MUoRrKqRKz4OeCG4cT/tK
    +UCwiXPV0DS/kZmgse3iqCfWhnIVC2AXWwkXLWIR2qvwAtqjWGReNWLPc3TTdP9jZ
    +0an4GmgI/YVM7ty5WiV9U1h24IpC6EHZkTZkzDMXy5dpMqMHgxAujHUigm3HQ8jO
    +OlWt8mNyk7gHYz+sdQC8wGqwQ4+s22tFae4PNDnCo5i2LfKxSJo8G+w/RGpzsthp
    +CtgsIQECgYEA/L+IDk3jWAuF6OkbYqbL/kArh/IYi3XnuChXc6rYZRCw34Z3Ayl3
    +8HGSNzzBP2gVr7wJ/q6JJf1HwpuZ0H8F1tzGMjaZpNByY1gEHHsbHRwjPSNAoBKf
    +LiJ5/vBBapWFYdpbaEC4RriMDqqj5N3EFfCJlvgpqplmM20mcPezqqkCgYEA4a+/
    +qDn/rJctDFX+VosR01ESQFOqdlck7WjbDYrizf1t1WsfUJfRxJt0T8dksRRCMZZT
    +3MG9VZrPzjZr09bd8Vhqe0yDl/9P5roTSBtvwXK+OzYFo4YleMxP032sJHD+/zLo
    +1MZdv85u0Ry61Nm5ovK1fjxqpVRNML+4io+G0nkCgYB5CN+GuhXc2+fMmZTbsTSP
    +FPvDplPKtTO6JNd8NJxcCZop8KKdiQY+xqRWf2mri5SXC11d8QcMgjYTI6CND8ck
    +FaVz9mGtY4Tjvgp5+RoK3qahaHhSL5i65xe01ij9eYzeR0ruqc+VTlsDywOhXfHA
    +7+dzvHN3lu4yQlreTkBjwQKBgC8GyBDtxPDZr3Famdy+rcAyrHLq/CIcln1B1CNG
    +RWxW2oQWBjhs6jDlk57sFXcwmXprsJ3XYPRthc+aV22U4DpiCg4XK3SIpsWVEF9+
    +GBNfLXR5FUO1uCkrqZaQIUaWpQvYN9veWbqV8VFxgxzHIX9qw5bCUBaTMmJuEK4O
    +qjFBAoGAYQRv4i7xOv2daIenpMullA8e5y/hQw7tyWzQFrOkTA7PXxPu4gxI/DMW
    +S9D1J1TLEL3+NxMNW6MiBsJTbXUWeXS2ihQICfGG6iKbZxXxkP/LyoSvseBVsxHd
    +5Lkc0GT1UVVeTqem0adSm/DTDwkF2+9qon2FGiO7oq1CzUorfcI=
     -----END RSA PRIVATE KEY-----
    diff --git a/tests/certificate-authority/server-keys/proxy.cert.pem b/tests/certificate-authority/server-keys/proxy.cert.pem
    index 02caee5826346..85687bdfd30ba 100644
    --- a/tests/certificate-authority/server-keys/proxy.cert.pem
    +++ b/tests/certificate-authority/server-keys/proxy.cert.pem
    @@ -1,27 +1,110 @@
    +Certificate:
    +    Data:
    +        Version: 3 (0x2)
    +        Serial Number: 4104 (0x1008)
    +    Signature Algorithm: sha256WithRSAEncryption
    +        Issuer: CN=foobar
    +        Validity
    +            Not Before: May 10 15:50:19 2023 GMT
    +            Not After : Feb 22 15:50:19 2297 GMT
    +        Subject: CN=proxy-localhost-SAN
    +        Subject Public Key Info:
    +            Public Key Algorithm: rsaEncryption
    +                Public-Key: (2048 bit)
    +                Modulus:
    +                    00:cc:15:c9:85:06:43:47:bd:46:9f:4f:03:1a:e0:
    +                    6e:94:13:4e:b0:30:ea:88:ca:3a:e4:39:92:12:c1:
    +                    77:51:8c:0d:3c:b9:26:5c:2f:dc:fc:b1:5a:bf:0e:
    +                    47:ff:09:60:30:79:8e:55:26:fe:d0:a1:ed:9f:6d:
    +                    8a:6a:06:85:f0:d0:dc:94:a6:54:a1:a6:c9:3e:57:
    +                    d5:69:7d:e9:25:c1:ef:6b:77:e1:62:76:d8:e4:54:
    +                    91:40:bc:0b:11:74:b8:30:bb:d4:02:77:d6:bd:d2:
    +                    d0:e7:ad:df:7d:98:96:74:42:ad:53:b3:88:c8:dc:
    +                    1d:db:51:63:84:ee:7e:85:73:14:5e:d4:c8:f0:01:
    +                    5f:67:52:ed:94:87:f7:d6:aa:28:8b:2c:84:98:8c:
    +                    b9:91:b5:38:99:80:5d:b3:d4:db:95:96:09:ef:1d:
    +                    a1:6f:86:c8:17:86:f7:0a:1e:72:3b:50:8c:53:e5:
    +                    ce:d4:8c:cf:cc:81:3d:46:55:ff:65:25:0b:36:31:
    +                    31:a6:22:27:47:96:59:38:c1:cd:66:a6:9a:83:98:
    +                    dc:b8:2e:10:8d:ba:45:ae:aa:20:6e:e3:0b:bd:ec:
    +                    e6:63:b5:40:55:d4:fe:97:b1:f1:8d:9a:c0:a2:46:
    +                    8e:a3:ed:a0:1b:ed:40:b0:00:a5:28:f9:da:03:bd:
    +                    c1:a9
    +                Exponent: 65537 (0x10001)
    +        X509v3 extensions:
    +            X509v3 Basic Constraints: 
    +                CA:FALSE
    +            Netscape Cert Type: 
    +                SSL Server
    +            Netscape Comment: 
    +                OpenSSL Generated Server Certificate
    +            X509v3 Subject Key Identifier: 
    +                C5:33:73:67:03:B7:51:08:F4:BD:D3:CD:4F:DC:CF:83:11:53:AD:39
    +            X509v3 Subject Alternative Name: 
    +                DNS:localhost, IP Address:127.0.0.1
    +            X509v3 Authority Key Identifier: 
    +                keyid:57:0B:E9:CB:23:E8:BF:47:3E:50:7A:3F:45:7E:A1:18:43:9D:15:27
    +                DirName:/CN=foobar
    +                serial:D7:E2:87:4F:A0:79:E2:0C
    +
    +            X509v3 Key Usage: critical
    +                Digital Signature, Key Encipherment
    +            X509v3 Extended Key Usage: 
    +                TLS Web Server Authentication
    +    Signature Algorithm: sha256WithRSAEncryption
    +         43:ef:67:29:9a:0c:53:97:7c:fc:72:73:6c:8d:48:78:4e:ec:
    +         e3:14:9d:d9:1e:83:4c:d6:f0:56:e9:c4:d8:de:f5:54:fb:a5:
    +         3b:ff:59:23:75:26:74:f0:86:90:d0:4d:41:25:03:87:e0:60:
    +         a4:9b:33:3d:bd:1c:79:b8:db:86:1c:38:09:26:0d:80:3e:f9:
    +         1e:28:11:0d:3d:6b:1e:1a:7a:9a:fa:fc:18:22:7f:fd:46:55:
    +         c2:2f:56:5c:5c:8a:45:f2:74:7a:e4:6c:d0:e0:ea:ec:74:b7:
    +         0d:a8:f3:ca:18:cf:a4:be:a0:e0:4a:32:ca:15:7e:5d:06:56:
    +         b7:71:7c:e0:dc:19:fa:be:3e:94:84:20:be:96:34:61:0b:f0:
    +         d1:d6:31:49:0b:b0:20:b8:f9:5c:49:08:13:9b:45:c0:6f:58:
    +         16:81:0b:0c:f8:66:38:58:83:d4:b0:bc:14:35:8d:e2:1d:d5:
    +         2d:ea:02:ae:42:e1:88:22:5a:b0:cf:e5:31:b1:cb:d3:e9:d2:
    +         5e:88:55:bd:62:ac:85:aa:4e:fc:18:6b:65:f9:9e:fc:93:27:
    +         0c:c6:29:aa:f0:64:6e:72:dc:d9:95:ae:38:ae:64:9e:c6:44:
    +         8a:0b:0f:0e:d4:69:7e:79:e0:46:d0:75:96:2a:1a:60:af:30:
    +         23:dc:d2:67:0d:08:2a:9d:58:29:09:1e:c8:08:d5:3a:88:2d:
    +         1a:dc:47:dc:5d:bd:0d:5c:54:f1:5d:5a:6d:0d:de:bc:18:67:
    +         2d:dd:1b:fe:8b:0e:03:19:b0:0f:f2:59:69:d0:7a:4f:a1:33:
    +         74:f7:22:ef:ff:90:e1:4b:8e:ac:13:00:6f:00:9b:55:83:d2:
    +         96:db:a8:81:c9:a9:8d:c6:a6:21:3d:14:d3:43:71:28:c6:ea:
    +         6d:2d:91:b9:58:bf:ec:18:75:c4:8c:10:43:88:60:08:c0:bb:
    +         9d:fb:90:80:1e:d5:a3:ea:e7:8a:16:f7:f4:d7:cb:35:93:03:
    +         55:e4:cc:58:31:1e:df:6e:e4:1b:6e:ad:3a:76:56:e5:8b:4e:
    +         d9:71:af:11:92:a7:7a:e2:66:cc:d2:73:f3:ec:e8:3b:67:f0:
    +         6a:31:10:82:e8:c4:1e:ae:c3:54:a7:e2:42:86:fe:43:75:ad:
    +         ef:83:d7:1c:2f:91:94:1c:57:9d:1c:43:94:b1:47:b2:6c:96:
    +         fd:83:69:0f:6c:e2:18:9b:65:8e:71:08:01:b3:73:46:aa:3c:
    +         2e:07:14:cd:03:ae:dc:5a:51:da:c5:41:53:cc:f5:fc:c8:db:
    +         4e:76:27:99:9a:ec:40:68:07:d6:10:e1:f9:68:6b:5d:52:95:
    +         3d:01:f4:a7:40:11:61:0a
     -----BEGIN CERTIFICATE-----
    -MIIEjzCCAnegAwIBAgICEAYwDQYJKoZIhvcNAQENBQAwETEPMA0GA1UEAwwGZm9v
    -YmFyMCAXDTIyMTAxODEwMTQwMVoYDzIyOTYwODAyMTAxNDAxWjAiMSAwHgYDVQQD
    -DBdwcm94eS5wdWxzYXIuYXBhY2hlLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEP
    -ADCCAQoCggEBAPPnBnkHqKvXuv7BKOoQ8nAa7gEVAjzRANhOx2Yk3/JpN1/Ash48
    -UltPjHtop1kXLrnjM3DahQuolz1A/N5sN2RGoe+/Y/aI/FRDF25yGzEoM/kwZDjm
    -ejQj2Hb6YsupI+YYtPr5ZDSeIBvvlVurXfXJkZf5CXYeEjqr1pEpLpNCZoWoOiiC
    -73/0KBoOToR5+akw+Db2Qr5FSz7AuTQ9KUZ1HZNl4xZBuEha6avESdRykH2XQzDs
    -qMBVruByHbzO1pg/op4iOhqQ6DFu67veKjWzMLxKR7x/A8UOd9f9D3+pabBoU72b
    -NqgwbKCnERoo3Y0ge1B1x7GORR7GHrWSKlUCAwEAAaOB3TCB2jAJBgNVHRMEAjAA
    -MBEGCWCGSAGG+EIBAQQEAwIGQDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5l
    -cmF0ZWQgU2VydmVyIENlcnRpZmljYXRlMB0GA1UdDgQWBBQqVR7lwaEgKHsI8+D8
    -nNxPmgWZ7TBBBgNVHSMEOjA4gBRXC+nLI+i/Rz5Qej9FfqEYQ50VJ6EVpBMwETEP
    -MA0GA1UEAwwGZm9vYmFyggkA1+KHT6B54gwwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
    -JQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBDQUAA4ICAQBtoQTZ5u6NpDIKHo6V
    -yZqkRrMcg9J61zRm0tbf4D/iIsfWNiJrAWSudK4OgkUrXj4LFWKvzzcZtPltuUr5
    -yODXZgz8lnyLbw6GyrKFU4Gpbr8Be30Y1yF7dfTV0yp5ZoIXNILfKhU3not1yL41
    -0owaO7N0PyDAzQ7erPbbB9UG7xhYM5qFfAnevwX1rde12JHJULfeE9Ushuv+DcK5
    -JmNvkRE+nB/dljsST9pW+zjBDuhwTiDZMPtUPyM0tPn6+x5zwF0pWFKhCkO8lVhr
    -TxCG/bMF3j/0MxjQvDvcijJFHaZqLHsw/FqgEM5SNgAsTuuY7wBohSNRddfvahV1
    -xPdXUrALuDH/NmIzaYZW6hh6mOhl+R7lP2XXZbFTpTGVdoosdBTGkjbPGKMrT/L8
    -hwLvFezXaHZzqj4hLnmqFbhu+dDH55EE1HT5RP7kxGCq1AMuwlsjOVxURS0FZi87
    -Oaq19NKsyWfdf8igONsk0GBt5HeG+93fJkW/SxssTJdz1xc91KgGDlP3nAW3xBAz
    -TRvgiKIeMzOh+SWkTyz/cJugyxD+wXaAEL7VYsgOwilV+rbWKTDPvnNORqrLO/md
    -MHZqYWkFlld2kw8i4LYc6zXOsOWlOv0ZM7VcEs7ufBADQEiZPkDNvWlzM97oDabE
    -n/htdqxnoZ3NHJ1HJnz03jKSfg==
    +MIIEpzCCAo+gAwIBAgICEAgwDQYJKoZIhvcNAQELBQAwETEPMA0GA1UEAwwGZm9v
    +YmFyMCAXDTIzMDUxMDE1NTAxOVoYDzIyOTcwMjIyMTU1MDE5WjAeMRwwGgYDVQQD
    +DBNwcm94eS1sb2NhbGhvc3QtU0FOMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
    +CgKCAQEAzBXJhQZDR71Gn08DGuBulBNOsDDqiMo65DmSEsF3UYwNPLkmXC/c/LFa
    +vw5H/wlgMHmOVSb+0KHtn22KagaF8NDclKZUoabJPlfVaX3pJcHva3fhYnbY5FSR
    +QLwLEXS4MLvUAnfWvdLQ563ffZiWdEKtU7OIyNwd21FjhO5+hXMUXtTI8AFfZ1Lt
    +lIf31qooiyyEmIy5kbU4mYBds9TblZYJ7x2hb4bIF4b3Ch5yO1CMU+XO1IzPzIE9
    +RlX/ZSULNjExpiInR5ZZOMHNZqaag5jcuC4QjbpFrqogbuMLvezmY7VAVdT+l7Hx
    +jZrAokaOo+2gG+1AsAClKPnaA73BqQIDAQABo4H5MIH2MAkGA1UdEwQCMAAwEQYJ
    +YIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRl
    +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFMUzc2cDt1EI9L3TzU/cz4MR
    +U605MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATBBBgNVHSMEOjA4gBRXC+nL
    +I+i/Rz5Qej9FfqEYQ50VJ6EVpBMwETEPMA0GA1UEAwwGZm9vYmFyggkA1+KHT6B5
    +4gwwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3
    +DQEBCwUAA4ICAQBD72cpmgxTl3z8cnNsjUh4TuzjFJ3ZHoNM1vBW6cTY3vVU+6U7
    +/1kjdSZ08IaQ0E1BJQOH4GCkmzM9vRx5uNuGHDgJJg2APvkeKBENPWseGnqa+vwY
    +In/9RlXCL1ZcXIpF8nR65GzQ4OrsdLcNqPPKGM+kvqDgSjLKFX5dBla3cXzg3Bn6
    +vj6UhCC+ljRhC/DR1jFJC7AguPlcSQgTm0XAb1gWgQsM+GY4WIPUsLwUNY3iHdUt
    +6gKuQuGIIlqwz+UxscvT6dJeiFW9YqyFqk78GGtl+Z78kycMximq8GRuctzZla44
    +rmSexkSKCw8O1Gl+eeBG0HWWKhpgrzAj3NJnDQgqnVgpCR7ICNU6iC0a3EfcXb0N
    +XFTxXVptDd68GGct3Rv+iw4DGbAP8llp0HpPoTN09yLv/5DhS46sEwBvAJtVg9KW
    +26iByamNxqYhPRTTQ3EoxuptLZG5WL/sGHXEjBBDiGAIwLud+5CAHtWj6ueKFvf0
    +18s1kwNV5MxYMR7fbuQbbq06dlbli07Zca8Rkqd64mbM0nPz7Og7Z/BqMRCC6MQe
    +rsNUp+JChv5Dda3vg9ccL5GUHFedHEOUsUeybJb9g2kPbOIYm2WOcQgBs3NGqjwu
    +BxTNA67cWlHaxUFTzPX8yNtOdieZmuxAaAfWEOH5aGtdUpU9AfSnQBFhCg==
     -----END CERTIFICATE-----
    diff --git a/tests/certificate-authority/server-keys/proxy.csr.pem b/tests/certificate-authority/server-keys/proxy.csr.pem
    index 8dbf74bb819ad..6cebd3548a14b 100644
    --- a/tests/certificate-authority/server-keys/proxy.csr.pem
    +++ b/tests/certificate-authority/server-keys/proxy.csr.pem
    @@ -1,15 +1,15 @@
     -----BEGIN CERTIFICATE REQUEST-----
    -MIICZzCCAU8CAQAwIjEgMB4GA1UEAwwXcHJveHkucHVsc2FyLmFwYWNoZS5vcmcw
    -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDz5wZ5B6ir17r+wSjqEPJw
    -Gu4BFQI80QDYTsdmJN/yaTdfwLIePFJbT4x7aKdZFy654zNw2oULqJc9QPzebDdk
    -RqHvv2P2iPxUQxduchsxKDP5MGQ45no0I9h2+mLLqSPmGLT6+WQ0niAb75Vbq131
    -yZGX+Ql2HhI6q9aRKS6TQmaFqDoogu9/9CgaDk6EefmpMPg29kK+RUs+wLk0PSlG
    -dR2TZeMWQbhIWumrxEnUcpB9l0Mw7KjAVa7gch28ztaYP6KeIjoakOgxbuu73io1
    -szC8Ske8fwPFDnfX/Q9/qWmwaFO9mzaoMGygpxEaKN2NIHtQdcexjkUexh61kipV
    -AgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAMBYwlvpcPsZQQMwUbts7GsX35Hcn
    -FAl8iWcKr9uw/9sSrZkstI9Aa8As+KYPeY3Z2p5TYY1TXokZa936NB00CWnY+gxY
    -lfKXy31yPqEHSwir1pQDU+WTILwZfbptFpAFEBy0SCDWrBZJUbM1ngqcVDg9jlQi
    -iZMDYbsnZ828Hn4e97P83bOubSBWIf1Rp6LcbIzJtwGCGVp+XPJYPMFXmpzAtwrT
    -tSgzCnHXseYKwIbjr+ReW58jE8Z59UqBm3/VeidLg94VfITuN5et42yypWd9Z7DU
    -C/qE8gjrqlvl49Xi6ye/RxKTMN+8TiQigU5ngEnYvNKbpKhU4veXHKjfrg==
    +MIICYzCCAUsCAQAwHjEcMBoGA1UEAwwTcHJveHktbG9jYWxob3N0LVNBTjCCASIw
    +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwVyYUGQ0e9Rp9PAxrgbpQTTrAw
    +6ojKOuQ5khLBd1GMDTy5Jlwv3PyxWr8OR/8JYDB5jlUm/tCh7Z9timoGhfDQ3JSm
    +VKGmyT5X1Wl96SXB72t34WJ22ORUkUC8CxF0uDC71AJ31r3S0Oet332YlnRCrVOz
    +iMjcHdtRY4TufoVzFF7UyPABX2dS7ZSH99aqKIsshJiMuZG1OJmAXbPU25WWCe8d
    +oW+GyBeG9woecjtQjFPlztSMz8yBPUZV/2UlCzYxMaYiJ0eWWTjBzWammoOY3Lgu
    +EI26Ra6qIG7jC73s5mO1QFXU/pex8Y2awKJGjqPtoBvtQLAApSj52gO9wakCAwEA
    +AaAAMA0GCSqGSIb3DQEBCwUAA4IBAQCqq+WUWY5w8RHsMi/zeR0JjXhbgdYSlsfP
    +J0fUTB88Jr/litM2u96/HFPC4K8MDlei7cKccyrRsLd+iEbG3ydNRB66WBCjq93o
    +/5LSMYGCejMDAdeE8qWBDf53Xlg+hGhMlFbbmL4JGTKX0OjAKnF5xT5i7n9rh/dM
    +FwoIU6ac+5btszD62IRGhyOmOHXSqL+KYArKhXKzGICFKvrOwfGrSC7Rt9zwV+lL
    +UB6NRWR5viSgIkwYw/W4R9M1iQPQLjGm/ibjW/FXWzr98LNgs8kjqMoAIDs7z8YU
    +FCT6YDb8zJlqiOBKnU7ReetKsHwPM2mAyWMS6z8R3LOSNdgrP70Y
     -----END CERTIFICATE REQUEST-----
    diff --git a/tests/certificate-authority/server-keys/proxy.key-pk8.pem b/tests/certificate-authority/server-keys/proxy.key-pk8.pem
    index 114fe2fb04d09..0dc72cde403f8 100644
    --- a/tests/certificate-authority/server-keys/proxy.key-pk8.pem
    +++ b/tests/certificate-authority/server-keys/proxy.key-pk8.pem
    @@ -1,28 +1,28 @@
     -----BEGIN PRIVATE KEY-----
    -MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDz5wZ5B6ir17r+
    -wSjqEPJwGu4BFQI80QDYTsdmJN/yaTdfwLIePFJbT4x7aKdZFy654zNw2oULqJc9
    -QPzebDdkRqHvv2P2iPxUQxduchsxKDP5MGQ45no0I9h2+mLLqSPmGLT6+WQ0niAb
    -75Vbq131yZGX+Ql2HhI6q9aRKS6TQmaFqDoogu9/9CgaDk6EefmpMPg29kK+RUs+
    -wLk0PSlGdR2TZeMWQbhIWumrxEnUcpB9l0Mw7KjAVa7gch28ztaYP6KeIjoakOgx
    -buu73io1szC8Ske8fwPFDnfX/Q9/qWmwaFO9mzaoMGygpxEaKN2NIHtQdcexjkUe
    -xh61kipVAgMBAAECggEBAJ/DuDC1fJ477OiNPLC+MyCN81NQIKwXt/b4+5KEGxHe
    -LACT59j4aHYZkIsSDXTFQ71N/1cwPLBbWd4s4LcNqecMgWzbMK7AIpFLdWDKa9dy
    -X0EemrfO+UOIK3YcI3UGsVY63un7TNFOtve1o19tzFmBFNa4saLmpcg64Y0qrbCV
    -KcHslT1T07szp5s+weiMxgsD17foNSBEXLxP7+1F9NPlWuiHh+Rl2/t+K2tjrXeI
    -EN9dtv29q4v9jCRU4yhIunAjLEvrMYCSGhXEGa+MRkgXkTPhhVN5nWX6M0uDyKgK
    -aJJBv+/H6QVj4XetubYdLjII0L2q/vckoD5JsaYfz40CgYEA/7ID5OWbp/OOCjK1
    -wbMByKwLUL5tHapZIoYdNg/w6zjjYl1TM9e18p1llOb+oPTEk+p8LigkkkDvPrEZ
    -zAhAU3Z3nRWGkVOLNYycuSed283Up0Kml08vsRNGDa78bma4GaWnJpOuPx5fB1HN
    -njjq9XhYzIEAHO4dT2dAQB003JMCgYEA9DFp0FnfsZsuAMLwBJJ08yHn4CjoYpMq
    -TAg3JScEjnm1ELJBvqLYRHzqHVeSKUHTtVDwaAqMe43qEnQ3IuFS+dhJGfOX41Cf
    -Yw7WDZvIeuPZER7WXUY27wmjGbjx6SdIuDYnYYA0P3RSGm3VcGZqaLoW7MvfDB0y
    -pYpVSV6pFncCgYEAz5/dSaCoJFjAncdPj1mruSb6iTYXpF8OwdnlHmETX+1xtg3R
    -4ebm93qXYbGwUUJv3SwqadBu4dOYcW+dYu/QS/WGaydvfdI41+K14CMrK7CXXLni
    -TDsgnsjnuXS9xWfjVfANKmYAt4AR6f+i1zegknKGqIiXbuZrJm7Q3T7aDcECgYEA
    -7tXBm6G7kzemt+Hx5VblgcOgyfLYz0kG7pR+cx0FbOCHAsyGVxFpGxtd09MJxsZ2
    -bXm7mNbwbgvwa5o1Ly1Y/brYTMSewxrguX8SRv8eB2wAq6kQmuwI4KT5XDgyiwr8
    -Kgf1XnyJHaMEhor0XlodK08PCw2fm3aXSafSIM+v66MCgYEAiGDfCy25tcI4UpAb
    -v8WjI2Y7EXE2vJQ/mqMhKzmfME8HMzBvuzhwAERJgPHh1lNOIwH1LnF4lZS7jr75
    -A78lgfTj6ZKNHpr5s5+5zdllvFwQ51SczCUnZv0flb/S5Qeciqh0a//pe9FQvL3+
    -3cqpvX158ljL8FYfcPQOBuIUdjw=
    +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDMFcmFBkNHvUaf
    +TwMa4G6UE06wMOqIyjrkOZISwXdRjA08uSZcL9z8sVq/Dkf/CWAweY5VJv7Qoe2f
    +bYpqBoXw0NyUplShpsk+V9Vpfeklwe9rd+FidtjkVJFAvAsRdLgwu9QCd9a90tDn
    +rd99mJZ0Qq1Ts4jI3B3bUWOE7n6FcxRe1MjwAV9nUu2Uh/fWqiiLLISYjLmRtTiZ
    +gF2z1NuVlgnvHaFvhsgXhvcKHnI7UIxT5c7UjM/MgT1GVf9lJQs2MTGmIidHllk4
    +wc1mppqDmNy4LhCNukWuqiBu4wu97OZjtUBV1P6XsfGNmsCiRo6j7aAb7UCwAKUo
    ++doDvcGpAgMBAAECggEAPTAXEFQVXe/ouaDV3HwHi0vSns67srF3QK/mFMt+e6uS
    +2G7mimMrTXPbMkcU3OkxtrbrLqqXYXP7K36LLkiwZcgpKkRIQYMg+RkaehtvCIwB
    +vWXe5Eefta2JMzBt3RjylGHsKaVGc/k9+whNZnmWOls3Xk4Ip7gfF39qaBOdSWLy
    +gVJJ9qbM6nHejl3vdSew5nRGlqGu0qonB+OhbM2GGSLEjH4BBbDlvI1Gl/mdKMLK
    +XMJf6WvO/d43+y+/EWmln2NF5lXHRwD+Gk84316mf+ANFy8V3yvp7BC9kDWqLeDH
    +A6wK9fmwPzwEWZKgzurN/Euz8gv1hEVcxH54HUBgVQKBgQD/iprN8phLUeA1VwJg
    +zm/AX6jM4GIEig0rXkYOOz1KI7wx6DunfnFINxSpDXf93cK7k6wtmLkxu0ONO9LE
    +1q4bmaBPkObfGOzBFixSOEE9ecyH9+EiFQi4O1zBqZLPObIY7tJ5xVsAmvQmwrf9
    +Agy6fKNGM1SFBWyHgmTUvtLrlwKBgQDMc4slLWyTZYZn7bpbuXslRBWSp3KROP8M
    +R5wCGe0xmOz2A/UEfGMruAS3/2+GJQlMpW25ACi3GX+Wq0OAlVxHZfArw9vaJSXm
    +Nj1rC0bkICKYHojS0kFckTHBGUEKOAaSdP1Ehm+eUaH0kVapfw9bSQ0+Yd5Z0XHS
    +65QqcY9kvwKBgFD8emc+tSlZv3boJmbLxfrv1i1oB2hs4BOYgxdLivcOMDyY3x8M
    +IZbDbhbNn/Oi7m5INM8WkcrDEHuYNAoSB4fTvky5HZIi8hWXk2BTV8nF6h5FXuJQ
    +TD0nAxSVS2PFYz4noijZdSfR9AK8v1a96Y7IpW5AIk8uEuE3YAFUoL/tAoGAMGXh
    +uIlKPJI6APw7s17zEd1OJgtRiaMubR++hJjSl30WCx7gr5EqgLztEQl8wwqdavF2
    +SecJvF5i363nKtcwow40joes0bUdhaOtYlunCnW4+r2vsghnxJvyZT2vMdYVaDId
    +ik0wuw+kARsuoq0bW4athejxE94Kzd1Kk8mSIk0CgYEAiBYxMU8c821HwRzHGUi4
    +MEqrOV7D0WrQIeeunT9yBZvP1/OpH9VcHXM53JOQLX/1bF3gGzLevOxvzhSid9Kg
    +7avc497uam9LfmIueIsc90yH6Qf83O2pwLc6x/Jx9cyVX4elMC9b+CpXPzC4RDla
    +NGH4KgEBAySS0x7x6a31/yg=
     -----END PRIVATE KEY-----
    diff --git a/tests/certificate-authority/server-keys/proxy.key.pem b/tests/certificate-authority/server-keys/proxy.key.pem
    index ec79e9ddf234c..17c431ba9f5ca 100644
    --- a/tests/certificate-authority/server-keys/proxy.key.pem
    +++ b/tests/certificate-authority/server-keys/proxy.key.pem
    @@ -1,27 +1,27 @@
     -----BEGIN RSA PRIVATE KEY-----
    -MIIEpgIBAAKCAQEA8+cGeQeoq9e6/sEo6hDycBruARUCPNEA2E7HZiTf8mk3X8Cy
    -HjxSW0+Me2inWRcuueMzcNqFC6iXPUD83mw3ZEah779j9oj8VEMXbnIbMSgz+TBk
    -OOZ6NCPYdvpiy6kj5hi0+vlkNJ4gG++VW6td9cmRl/kJdh4SOqvWkSkuk0Jmhag6
    -KILvf/QoGg5OhHn5qTD4NvZCvkVLPsC5ND0pRnUdk2XjFkG4SFrpq8RJ1HKQfZdD
    -MOyowFWu4HIdvM7WmD+iniI6GpDoMW7ru94qNbMwvEpHvH8DxQ531/0Pf6lpsGhT
    -vZs2qDBsoKcRGijdjSB7UHXHsY5FHsYetZIqVQIDAQABAoIBAQCfw7gwtXyeO+zo
    -jTywvjMgjfNTUCCsF7f2+PuShBsR3iwAk+fY+Gh2GZCLEg10xUO9Tf9XMDywW1ne
    -LOC3DannDIFs2zCuwCKRS3VgymvXcl9BHpq3zvlDiCt2HCN1BrFWOt7p+0zRTrb3
    -taNfbcxZgRTWuLGi5qXIOuGNKq2wlSnB7JU9U9O7M6ebPsHojMYLA9e36DUgRFy8
    -T+/tRfTT5Vroh4fkZdv7fitrY613iBDfXbb9vauL/YwkVOMoSLpwIyxL6zGAkhoV
    -xBmvjEZIF5Ez4YVTeZ1l+jNLg8ioCmiSQb/vx+kFY+F3rbm2HS4yCNC9qv73JKA+
    -SbGmH8+NAoGBAP+yA+Tlm6fzjgoytcGzAcisC1C+bR2qWSKGHTYP8Os442JdUzPX
    -tfKdZZTm/qD0xJPqfC4oJJJA7z6xGcwIQFN2d50VhpFTizWMnLknndvN1KdCppdP
    -L7ETRg2u/G5muBmlpyaTrj8eXwdRzZ446vV4WMyBABzuHU9nQEAdNNyTAoGBAPQx
    -adBZ37GbLgDC8ASSdPMh5+Ao6GKTKkwINyUnBI55tRCyQb6i2ER86h1XkilB07VQ
    -8GgKjHuN6hJ0NyLhUvnYSRnzl+NQn2MO1g2byHrj2REe1l1GNu8Joxm48eknSLg2
    -J2GAND90Uhpt1XBmami6FuzL3wwdMqWKVUleqRZ3AoGBAM+f3UmgqCRYwJ3HT49Z
    -q7km+ok2F6RfDsHZ5R5hE1/tcbYN0eHm5vd6l2GxsFFCb90sKmnQbuHTmHFvnWLv
    -0Ev1hmsnb33SONfiteAjKyuwl1y54kw7IJ7I57l0vcVn41XwDSpmALeAEen/otc3
    -oJJyhqiIl27mayZu0N0+2g3BAoGBAO7VwZuhu5M3prfh8eVW5YHDoMny2M9JBu6U
    -fnMdBWzghwLMhlcRaRsbXdPTCcbGdm15u5jW8G4L8GuaNS8tWP262EzEnsMa4Ll/
    -Ekb/HgdsAKupEJrsCOCk+Vw4MosK/CoH9V58iR2jBIaK9F5aHStPDwsNn5t2l0mn
    -0iDPr+ujAoGBAIhg3wstubXCOFKQG7/FoyNmOxFxNryUP5qjISs5nzBPBzMwb7s4
    -cABESYDx4dZTTiMB9S5xeJWUu46++QO/JYH04+mSjR6a+bOfuc3ZZbxcEOdUnMwl
    -J2b9H5W/0uUHnIqodGv/6XvRULy9/t3Kqb19efJYy/BWH3D0DgbiFHY8
    +MIIEowIBAAKCAQEAzBXJhQZDR71Gn08DGuBulBNOsDDqiMo65DmSEsF3UYwNPLkm
    +XC/c/LFavw5H/wlgMHmOVSb+0KHtn22KagaF8NDclKZUoabJPlfVaX3pJcHva3fh
    +YnbY5FSRQLwLEXS4MLvUAnfWvdLQ563ffZiWdEKtU7OIyNwd21FjhO5+hXMUXtTI
    +8AFfZ1LtlIf31qooiyyEmIy5kbU4mYBds9TblZYJ7x2hb4bIF4b3Ch5yO1CMU+XO
    +1IzPzIE9RlX/ZSULNjExpiInR5ZZOMHNZqaag5jcuC4QjbpFrqogbuMLvezmY7VA
    +VdT+l7HxjZrAokaOo+2gG+1AsAClKPnaA73BqQIDAQABAoIBAD0wFxBUFV3v6Lmg
    +1dx8B4tL0p7Ou7Kxd0Cv5hTLfnurkthu5opjK01z2zJHFNzpMba26y6ql2Fz+yt+
    +iy5IsGXIKSpESEGDIPkZGnobbwiMAb1l3uRHn7WtiTMwbd0Y8pRh7CmlRnP5PfsI
    +TWZ5ljpbN15OCKe4Hxd/amgTnUli8oFSSfamzOpx3o5d73UnsOZ0RpahrtKqJwfj
    +oWzNhhkixIx+AQWw5byNRpf5nSjCylzCX+lrzv3eN/svvxFppZ9jReZVx0cA/hpP
    +ON9epn/gDRcvFd8r6ewQvZA1qi3gxwOsCvX5sD88BFmSoM7qzfxLs/IL9YRFXMR+
    +eB1AYFUCgYEA/4qazfKYS1HgNVcCYM5vwF+ozOBiBIoNK15GDjs9SiO8Meg7p35x
    +SDcUqQ13/d3Cu5OsLZi5MbtDjTvSxNauG5mgT5Dm3xjswRYsUjhBPXnMh/fhIhUI
    +uDtcwamSzzmyGO7SecVbAJr0JsK3/QIMunyjRjNUhQVsh4Jk1L7S65cCgYEAzHOL
    +JS1sk2WGZ+26W7l7JUQVkqdykTj/DEecAhntMZjs9gP1BHxjK7gEt/9vhiUJTKVt
    +uQAotxl/lqtDgJVcR2XwK8Pb2iUl5jY9awtG5CAimB6I0tJBXJExwRlBCjgGknT9
    +RIZvnlGh9JFWqX8PW0kNPmHeWdFx0uuUKnGPZL8CgYBQ/HpnPrUpWb926CZmy8X6
    +79YtaAdobOATmIMXS4r3DjA8mN8fDCGWw24WzZ/zou5uSDTPFpHKwxB7mDQKEgeH
    +075MuR2SIvIVl5NgU1fJxeoeRV7iUEw9JwMUlUtjxWM+J6Io2XUn0fQCvL9WvemO
    +yKVuQCJPLhLhN2ABVKC/7QKBgDBl4biJSjySOgD8O7Ne8xHdTiYLUYmjLm0fvoSY
    +0pd9Fgse4K+RKoC87REJfMMKnWrxdknnCbxeYt+t5yrXMKMONI6HrNG1HYWjrWJb
    +pwp1uPq9r7IIZ8Sb8mU9rzHWFWgyHYpNMLsPpAEbLqKtG1uGrYXo8RPeCs3dSpPJ
    +kiJNAoGBAIgWMTFPHPNtR8EcxxlIuDBKqzlew9Fq0CHnrp0/cgWbz9fzqR/VXB1z
    +OdyTkC1/9Wxd4Bsy3rzsb84UonfSoO2r3OPe7mpvS35iLniLHPdMh+kH/NztqcC3
    +OsfycfXMlV+HpTAvW/gqVz8wuEQ5WjRh+CoBAQMkktMe8emt9f8o
     -----END RSA PRIVATE KEY-----
    diff --git a/tests/docker-images/java-test-functions/pom.xml b/tests/docker-images/java-test-functions/pom.xml
    index db685d0394253..8625bbfc48bd1 100644
    --- a/tests/docker-images/java-test-functions/pom.xml
    +++ b/tests/docker-images/java-test-functions/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       
    -    org.apache.pulsar.tests
    +    io.streamnative.tests
         docker-images
         3.0.0-SNAPSHOT
       
    @@ -30,12 +30,12 @@
       Apache Pulsar :: Tests :: Docker Images :: Java Test Functions
       
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-io-core
           ${project.version}
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-functions-api
           ${project.version}
         
    @@ -59,7 +59,6 @@
         
       
       jar
    -
       
         
           docker
    @@ -68,15 +67,13 @@
               integrationTests
             
           
    -
           
             
    -          org.apache.pulsar
    +          io.streamnative
               pulsar-functions-api-examples
               ${project.version}
             
           
    -
           
             
               
    @@ -91,7 +88,7 @@
                     
                       
                         
    -                      org.apache.pulsar:pulsar-functions-api-examples
    +                      io.streamnative:pulsar-functions-api-examples
                         
                       
                     
    @@ -102,5 +99,4 @@
           
         
       
    -
     
    diff --git a/tests/docker-images/java-test-image/Dockerfile b/tests/docker-images/java-test-image/Dockerfile
    index 9e852050edf2e..5821e6eeaeae7 100644
    --- a/tests/docker-images/java-test-image/Dockerfile
    +++ b/tests/docker-images/java-test-image/Dockerfile
    @@ -17,7 +17,7 @@
     # under the License.
     #
     
    -FROM ubuntu:20.04
    +FROM ubuntu:22.04
     
     RUN groupadd -g 10001 pulsar
     RUN adduser -u 10000 --gid 10001 --disabled-login --disabled-password --gecos '' pulsar
    diff --git a/tests/docker-images/java-test-image/pom.xml b/tests/docker-images/java-test-image/pom.xml
    index 5154c4e5599e5..e063b11aad4a1 100644
    --- a/tests/docker-images/java-test-image/pom.xml
    +++ b/tests/docker-images/java-test-image/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       
    -    org.apache.pulsar.tests
    +    io.streamnative.tests
         docker-images
         3.0.0-SNAPSHOT
       
    @@ -29,7 +29,6 @@
       java-test-image
       Apache Pulsar :: Tests :: Docker Images :: Java Test Image
       pom
    -
       
         
           docker
    @@ -45,12 +44,12 @@
           
           
             
    -          org.apache.pulsar.tests
    +          io.streamnative.tests
               java-test-functions
               ${project.parent.version}
             
             
    -          org.apache.pulsar
    +          io.streamnative
               pulsar-server-distribution
               ${project.parent.version}
               bin
    @@ -78,7 +77,7 @@
                     
                       
                         
    -                      org.apache.pulsar.tests
    +                      io.streamnative.tests
                           java-test-functions
                           ${project.parent.version}
                           jar
    @@ -87,7 +86,7 @@
                           java-test-functions.jar
                         
                         
    -                      org.apache.pulsar
    +                      io.streamnative
                           pulsar-server-distribution
                           ${project.parent.version}
                           bin
    diff --git a/tests/docker-images/java-test-plugins/pom.xml b/tests/docker-images/java-test-plugins/pom.xml
    index b214c84bb6c36..f55aabad0f47e 100644
    --- a/tests/docker-images/java-test-plugins/pom.xml
    +++ b/tests/docker-images/java-test-plugins/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       
    -    org.apache.pulsar.tests
    +    io.streamnative.tests
         docker-images
         3.0.0-SNAPSHOT
       
    @@ -31,7 +31,7 @@
       jar
       
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-broker
           ${project.version}
           provided
    @@ -45,5 +45,4 @@
           
         
       
    -
     
    diff --git a/tests/docker-images/latest-version-image/Dockerfile b/tests/docker-images/latest-version-image/Dockerfile
    index f0093fa1eafc5..0d0070081523d 100644
    --- a/tests/docker-images/latest-version-image/Dockerfile
    +++ b/tests/docker-images/latest-version-image/Dockerfile
    @@ -58,10 +58,6 @@ FROM apachepulsar/pulsar:latest
     # However, any processes exec'ing into the containers will run as root, by default.
     USER root
     
    -# We need to define the user in order for supervisord to work correctly
    -# We don't need a user defined in the public docker image, though.
    -RUN adduser -u 10000 --gid 0 --disabled-login --disabled-password --gecos '' pulsar
    -
     RUN rm -rf /var/lib/apt/lists/* && apt update
     
     RUN apt-get clean && apt-get update && apt-get install -y supervisor vim procps curl
    diff --git a/tests/docker-images/latest-version-image/conf/functions_worker.conf b/tests/docker-images/latest-version-image/conf/functions_worker.conf
    index 8072639a0d4a2..6feb660231cec 100644
    --- a/tests/docker-images/latest-version-image/conf/functions_worker.conf
    +++ b/tests/docker-images/latest-version-image/conf/functions_worker.conf
    @@ -22,7 +22,7 @@ autostart=false
     redirect_stderr=true
     stdout_logfile=/var/log/pulsar/functions_worker.log
     directory=/pulsar
    -environment=PULSAR_MEM="-Xmx128M",PULSAR_GC="-XX:+UseZGC"
    +environment=PULSAR_MEM="-Xmx128M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/pulsar/logs/functions",PULSAR_GC="-XX:+UseZGC"
     command=/pulsar/bin/pulsar functions-worker
     user=pulsar
     stopwaitsecs=15
    \ No newline at end of file
    diff --git a/tests/docker-images/latest-version-image/pom.xml b/tests/docker-images/latest-version-image/pom.xml
    index 8474f820c9566..769a2c972950b 100644
    --- a/tests/docker-images/latest-version-image/pom.xml
    +++ b/tests/docker-images/latest-version-image/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       
    -    org.apache.pulsar.tests
    +    io.streamnative.tests
         docker-images
         3.0.0-SNAPSHOT
       
    @@ -29,7 +29,6 @@
       latest-version-image
       Apache Pulsar :: Tests :: Docker Images :: Latest Version Testing
       pom
    -
       
         
           docker
    @@ -40,17 +39,17 @@
           
           
             
    -          org.apache.pulsar.tests
    +          io.streamnative.tests
               java-test-functions
               ${project.parent.version}
             
             
    -          org.apache.pulsar.tests
    +          io.streamnative.tests
               java-test-plugins
               ${project.parent.version}
             
             
    -          org.apache.pulsar
    +          io.streamnative
               pulsar-all-docker-image
               ${project.parent.version}
               pom
    @@ -77,7 +76,7 @@
                     
                       
                         
    -                      org.apache.pulsar.tests
    +                      io.streamnative.tests
                           java-test-functions
                           ${project.parent.version}
                           jar
    @@ -86,7 +85,7 @@
                           java-test-functions.jar
                         
                         
    -                      org.apache.pulsar.tests
    +                      io.streamnative.tests
                           java-test-plugins
                           ${project.parent.version}
                           jar
    @@ -147,6 +146,7 @@
                     package
                     
                       build
    +                  tag
                     
                     
                       
    @@ -159,6 +159,11 @@
                               ${project.version}
                             
                             true
    +                        
    +                          
    +                            ${docker.platforms}
    +                          
    +                        
                           
                         
                       
    diff --git a/tests/docker-images/pom.xml b/tests/docker-images/pom.xml
    index ade69e0778b04..fe29786515781 100644
    --- a/tests/docker-images/pom.xml
    +++ b/tests/docker-images/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    +
       pom
       4.0.0
       
    -    org.apache.pulsar.tests
    +    io.streamnative.tests
         tests-parent
         3.0.0-SNAPSHOT
       
    diff --git a/tests/integration/pom.xml b/tests/integration/pom.xml
    index d9b817ca2e2b5..71d9d97b96f9c 100644
    --- a/tests/integration/pom.xml
    +++ b/tests/integration/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    +
       4.0.0
       
    -    org.apache.pulsar.tests
    +    io.streamnative.tests
         tests-parent
         3.0.0-SNAPSHOT
       
    -
       integration
       jar
       Apache Pulsar :: Tests :: Integration
    -
       
         pulsar.xml
         4.1.2
       
    -
       
         
           com.google.code.gson
    @@ -44,37 +40,37 @@
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-functions-api-examples
           ${project.version}
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-broker
           ${project.version}
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-common
           ${project.version}
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-client-original
           ${project.version}
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-client-admin-original
           ${project.version}
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           managed-ledger
           ${project.version}
           test
    @@ -96,102 +92,91 @@
           test
         
         
    -      org.apache.pulsar
    +      io.streamnative
           pulsar-io-kafka
           ${project.version}
           test
         
    -
         
           org.testcontainers
           mysql
           test
         
    -
         
           org.postgresql
           postgresql
           ${postgresql-jdbc.version}
           runtime
         
    -
         
           org.testcontainers
           postgresql
           test
         
    -
         
           com.github.docker-java
           docker-java-core
           test
         
    -
         
    -      org.apache.pulsar
    +      org.bouncycastle
    +      bcpkix-jdk18on
    +      test
    +    
    +    
    +      io.streamnative
           pulsar-io-jdbc-postgres
           ${project.version}
           test
         
    -
         
           com.fasterxml.jackson.core
           jackson-databind
           test
         
    -
         
           com.fasterxml.jackson.dataformat
           jackson-dataformat-yaml
           test
         
    -
         
           org.opensearch.client
           opensearch-rest-high-level-client
           test
         
    -
         
           co.elastic.clients
           elasticsearch-java
           test
         
    -
         
           org.testcontainers
           elasticsearch
           test
         
    -
    -
         
           com.rabbitmq
           amqp-client
           ${rabbitmq-client.version}
           test
         
    -
         
           joda-time
           joda-time
           test
         
    -
         
           io.trino
           trino-jdbc
           ${trino.version}
           test
         
    -
         
           org.awaitility
           awaitility
           test
         
    -
    -
    +    
         
           org.testcontainers
           localstack
    @@ -208,7 +193,6 @@
           aws-java-sdk-core
           test
         
    -
         
         
           org.testcontainers
    @@ -221,9 +205,7 @@
           ${mongo-reactivestreams.version}
           test
         
    -
       
    -
       
         
           
    @@ -264,7 +246,6 @@
           
         
       
    -
       
         
           integrationTests
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/bookkeeper/BookkeeperInstallWithHttpServerEnabledTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/bookkeeper/BookkeeperInstallWithHttpServerEnabledTest.java
    new file mode 100644
    index 0000000000000..03d7f974ab39b
    --- /dev/null
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/bookkeeper/BookkeeperInstallWithHttpServerEnabledTest.java
    @@ -0,0 +1,84 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *   http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +package org.apache.pulsar.tests.integration.bookkeeper;
    +
    +import lombok.extern.slf4j.Slf4j;
    +import org.apache.pulsar.tests.integration.docker.ContainerExecResult;
    +import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
    +import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec;
    +import org.apache.pulsar.tests.integration.topologies.PulsarClusterTestBase;
    +import org.testng.annotations.AfterClass;
    +import org.testng.annotations.BeforeClass;
    +import org.testng.annotations.Test;
    +
    +import java.util.stream.Stream;
    +
    +import static java.util.stream.Collectors.joining;
    +import static org.testng.Assert.assertEquals;
    +
    +/**
    + * Test bookkeeper setup with http server enabled.
    + */
    +@Slf4j
    +public class BookkeeperInstallWithHttpServerEnabledTest extends PulsarClusterTestBase {
    +
    +    @BeforeClass(alwaysRun = true)
    +    @Override
    +    public final void setupCluster() throws Exception {
    +        incrementSetupNumber();
    +
    +        final String clusterName = Stream.of(this.getClass().getSimpleName(), randomName(5))
    +                .filter(s -> !s.isEmpty())
    +                .collect(joining("-"));
    +        bookkeeperEnvs.put("httpServerEnabled", "true");
    +        bookieAdditionalPorts.add(8000);
    +        PulsarClusterSpec spec = PulsarClusterSpec.builder()
    +                .numBookies(2)
    +                .numBrokers(1)
    +                .bookkeeperEnvs(bookkeeperEnvs)
    +                .bookieAdditionalPorts(bookieAdditionalPorts)
    +                .clusterName(clusterName)
    +                .build();
    +
    +        log.info("Setting up cluster {} with {} bookies, {} brokers",
    +                spec.clusterName(), spec.numBookies(), spec.numBrokers());
    +
    +        pulsarCluster = PulsarCluster.forSpec(spec);
    +        pulsarCluster.start();
    +
    +        log.info("Cluster {} is setup", spec.clusterName());
    +    }
    +
    +    @AfterClass(alwaysRun = true)
    +    @Override
    +    public final void tearDownCluster() throws Exception {
    +        super.tearDownCluster();
    +    }
    +
    +    @Test
    +    public void testBookieHttpServerIsRunning() throws Exception {
    +        ContainerExecResult result = pulsarCluster.getAnyBookie().execCmd(
    +                PulsarCluster.CURL,
    +                "-X",
    +                "GET",
    +                "http://localhost:8000/heartbeat");
    +        assertEquals(result.getExitCode(), 0);
    +        assertEquals(result.getStdout(), "OK\n");
    +    }
    +}
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java
    index 7e8f55429244b..f13a4dcfbdceb 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java
    @@ -402,7 +402,7 @@ public void testSchemaCLI() throws Exception {
                 "upload",
                 topicName,
                 "-f",
    -            "/pulsar/conf/schema_example.conf"
    +            "/pulsar/conf/schema_example.json"
             );
             result.assertNoOutput();
     
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java
    index e0e85af024bc2..a51397050b97f 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java
    @@ -28,11 +28,20 @@ public class BrokerContainer extends PulsarContainer {
         public static final String NAME = "pulsar-broker";
     
         public BrokerContainer(String clusterName, String hostName) {
    -        super(clusterName, hostName, hostName, "bin/run-broker.sh", BROKER_PORT, BROKER_PORT_TLS,
    -                BROKER_HTTP_PORT, BROKER_HTTPS_PORT, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME);
    +        this(clusterName, hostName, false);
    +    }
    +
    +    public BrokerContainer(String clusterName, String hostName, boolean enableTls) {
    +        super(clusterName, hostName, hostName, "bin/run-broker.sh", BROKER_PORT,
    +                enableTls ? BROKER_PORT_TLS : 0, BROKER_HTTP_PORT,
    +                enableTls ? BROKER_HTTPS_PORT : 0, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME);
             tailContainerLog();
         }
     
    +    public String getHostName() {
    +        return super.hostname;
    +    }
    +
         @Override
         protected void afterStart() {
             DockerUtils.runCommandAsyncWithLogging(this.dockerClient, this.getContainerId(),
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java
    index 53283447378f5..f3926878f37c5 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java
    @@ -28,8 +28,13 @@ public class ProxyContainer extends PulsarContainer {
         public static final String NAME = "pulsar-proxy";
     
         public ProxyContainer(String clusterName, String hostName) {
    -        super(clusterName, hostName, hostName, "bin/run-proxy.sh", BROKER_PORT, BROKER_PORT_TLS, BROKER_HTTP_PORT,
    -                BROKER_HTTPS_PORT, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME);
    +        this(clusterName, hostName, false);
    +    }
    +
    +    public ProxyContainer(String clusterName, String hostName, boolean enableTls) {
    +        super(clusterName, hostName, hostName, "bin/run-proxy.sh", BROKER_PORT,
    +                enableTls ? BROKER_PORT_TLS : 0, BROKER_HTTP_PORT,
    +                enableTls ? BROKER_HTTPS_PORT : 0, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME);
         }
     
         @Override
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/PulsarContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/PulsarContainer.java
    index dc11acd00c3f2..56d64ce5b2c8e 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/PulsarContainer.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/PulsarContainer.java
    @@ -70,7 +70,7 @@ public abstract class PulsarContainer> exte
         public static final boolean PULSAR_CONTAINERS_LEAVE_RUNNING =
                 Boolean.parseBoolean(System.getenv("PULSAR_CONTAINERS_LEAVE_RUNNING"));
     
    -    private final String hostname;
    +    protected final String hostname;
         private final String serviceName;
         private final String serviceEntryPoint;
         private final int servicePort;
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java
    index eaf66974b982a..e6ba6acff83c0 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java
    @@ -24,15 +24,18 @@
     import static org.testng.Assert.assertFalse;
     import static org.testng.Assert.assertTrue;
     import static org.testng.Assert.fail;
    -
     import com.fasterxml.jackson.databind.JsonNode;
     import com.fasterxml.jackson.databind.ObjectMapper;
     import com.fasterxml.jackson.databind.node.ObjectNode;
    +import io.swagger.util.Json;
     import java.time.Duration;
    +import java.util.ArrayList;
    +import java.util.Arrays;
     import java.util.Collections;
     import java.util.HashMap;
     import java.util.HashSet;
     import java.util.LinkedHashMap;
    +import java.util.List;
     import java.util.Map;
     import java.util.Optional;
     import java.util.Set;
    @@ -40,8 +43,6 @@
     import java.util.concurrent.ConcurrentHashMap;
     import java.util.concurrent.TimeUnit;
     import java.util.concurrent.atomic.AtomicInteger;
    -
    -import io.swagger.util.Json;
     import lombok.Cleanup;
     import lombok.extern.slf4j.Slf4j;
     import org.apache.commons.lang3.StringUtils;
    @@ -74,8 +75,8 @@
     import org.apache.pulsar.common.util.ObjectMapperFactory;
     import org.apache.pulsar.functions.api.examples.AutoSchemaFunction;
     import org.apache.pulsar.functions.api.examples.AvroSchemaTestFunction;
    -import org.apache.pulsar.functions.api.examples.MergeTopicFunction;
     import org.apache.pulsar.functions.api.examples.InitializableFunction;
    +import org.apache.pulsar.functions.api.examples.MergeTopicFunction;
     import org.apache.pulsar.functions.api.examples.RecordFunction;
     import org.apache.pulsar.functions.api.examples.pojo.AvroTestObject;
     import org.apache.pulsar.functions.api.examples.pojo.Users;
    @@ -126,13 +127,14 @@ protected Map produceMessagesToInputTopic(String inputTopicName,
             return kvs;
         }
     
    -    protected void testFunctionLocalRun(Runtime runtime) throws  Exception {
    +    protected void testFunctionLocalRun(Runtime runtime) throws Exception {
             if (functionRuntimeType == FunctionRuntimeType.THREAD) {
                 return;
             }
     
     
    -        String inputTopicName = "persistent://public/default/test-function-local-run-" + runtime + "-input-" + randomName(8);
    +        String inputTopicName =
    +                "persistent://public/default/test-function-local-run-" + runtime + "-input-" + randomName(8);
             String outputTopicName = "test-function-local-run-" + runtime + "-output-" + randomName(8);
     
             final int numMessages = 10;
    @@ -377,7 +379,8 @@ protected void testFunctionNegAck(Runtime runtime) throws Exception {
     
             if (runtime == Runtime.PYTHON) {
                 submitFunction(
    -                    runtime, inputTopicName, outputTopicName, functionName, EXCEPTION_FUNCTION_PYTHON_FILE, EXCEPTION_PYTHON_CLASS, schema);
    +                    runtime, inputTopicName, outputTopicName, functionName, EXCEPTION_FUNCTION_PYTHON_FILE,
    +                    EXCEPTION_PYTHON_CLASS, schema);
             } else {
                 submitFunction(
                         runtime, inputTopicName, outputTopicName, functionName, null, EXCEPTION_JAVA_CLASS, schema);
    @@ -550,7 +553,7 @@ protected void testPublishFunction(Runtime runtime) throws Exception {
             final int numMessages = 10;
     
             // submit the exclamation function
    -        switch (runtime){
    +        switch (runtime) {
                 case JAVA:
                     submitFunction(
                             runtime,
    @@ -622,7 +625,8 @@ protected void testPublishFunction(Runtime runtime) throws Exception {
                         .create();
     
                 for (int i = 0; i < numMessages; i++) {
    -                producer.newMessage().key(String.valueOf(i)).property("count", String.valueOf(i)).value(("message-" + i).getBytes(UTF_8)).send();
    +                producer.newMessage().key(String.valueOf(i)).property("count", String.valueOf(i))
    +                        .value(("message-" + i).getBytes(UTF_8)).send();
                 }
     
                 Set expectedMessages = new HashSet<>();
    @@ -662,9 +666,10 @@ protected void testPublishFunction(Runtime runtime) throws Exception {
         protected void testExclamationFunction(Runtime runtime,
                                                boolean isTopicPattern,
                                                boolean pyZip,
    +                                           boolean multipleInput,
                                                boolean withExtraDeps) throws Exception {
    -        if (functionRuntimeType == FunctionRuntimeType.THREAD && runtime == Runtime.PYTHON) {
    -            // python can only run on process mode
    +        if (functionRuntimeType == FunctionRuntimeType.THREAD && (runtime == Runtime.PYTHON || runtime == Runtime.GO)) {
    +            // python&go can only run on process mode
                 return;
             }
     
    @@ -683,22 +688,9 @@ protected void testExclamationFunction(Runtime runtime,
                 admin.topics().createNonPartitionedTopic(outputTopicName);
             }
             if (isTopicPattern) {
    -            @Cleanup PulsarClient client = PulsarClient.builder()
    -                    .serviceUrl(pulsarCluster.getPlainTextServiceUrl())
    -                    .build();
    -
    -            @Cleanup Consumer consumer1 = client.newConsumer(schema)
    -                    .topic(inputTopicName + "1")
    -                    .subscriptionType(SubscriptionType.Exclusive)
    -                    .subscriptionName("test-sub")
    -                    .subscribe();
    -
    -            @Cleanup Consumer consumer2 = client.newConsumer(schema)
    -                    .topic(inputTopicName + "2")
    -                    .subscriptionType(SubscriptionType.Exclusive)
    -                    .subscriptionName("test-sub")
    -                    .subscribe();
                 inputTopicName = inputTopicName + ".*";
    +        } else if (multipleInput) {
    +            inputTopicName = inputTopicName + "1," + inputTopicName + "2";
             }
             String functionName = "test-exclamation-fn-" + randomName(8);
             final int numMessages = 10;
    @@ -725,8 +717,11 @@ protected void testExclamationFunction(Runtime runtime,
             // get function status
             getFunctionStatus(functionName, numMessages, true);
     
    -        // get function stats
    -        getFunctionStats(functionName, numMessages);
    +        if (Runtime.GO != runtime) {
    +            // TODO: Go runtime doesn't collect `process_latency_ms_1min` metric
    +            // get function stats
    +            getFunctionStats(functionName, numMessages);
    +        }
     
             // update parallelism
             updateFunctionParallelism(functionName, 2);
    @@ -734,6 +729,19 @@ protected void testExclamationFunction(Runtime runtime,
             //get function status
             getFunctionStatus(functionName, 0, true, 2);
     
    +        // update code file
    +        switch (runtime) {
    +            case JAVA:
    +                updateFunctionCodeFile(functionName, Runtime.JAVA, "test");
    +                break;
    +            case PYTHON:
    +                updateFunctionCodeFile(functionName, Runtime.PYTHON, EXCLAMATION_PYTHON_FILE);
    +                break;
    +            case GO:
    +                updateFunctionCodeFile(functionName, Runtime.GO, EXCLAMATION_GO_FILE);
    +                break;
    +        }
    +
             // delete function
             deleteFunction(functionName);
     
    @@ -787,6 +795,12 @@ private  void submitFunction(Runtime runtime,
                 } else {
                     file = EXCLAMATION_PYTHON_FILE;
                 }
    +        } else if (Runtime.GO == runtime) {
    +            if (isPublishFunction) {
    +                file = PUBLISH_FUNCTION_GO_FILE;
    +            } else {
    +                file = EXCLAMATION_GO_FILE;
    +            }
             }
     
             submitFunction(runtime, inputTopicName, outputTopicName, functionName, file, functionClass, inputTopicSchema);
    @@ -819,7 +833,7 @@ private  void submitFunction(Runtime runtime,
     
             if (StringUtils.isNotEmpty(inputTopicName)) {
                 ensureSubscriptionCreated(
    -                inputTopicName, String.format("public/default/%s", functionName), inputTopicSchema);
    +                    inputTopicName, String.format("public/default/%s", functionName), inputTopicSchema);
             }
     
             CommandGenerator generator;
    @@ -853,7 +867,7 @@ private  void submitFunction(Runtime runtime,
             }
             String command = "";
     
    -        switch (runtime){
    +        switch (runtime) {
                 case JAVA:
                     command = generator.generateCreateFunctionCommand();
                     break;
    @@ -893,12 +907,29 @@ private void updateFunctionParallelism(String functionName, int parallelism) thr
             assertTrue(result.getStdout().contains("Updated successfully"));
         }
     
    +    private void updateFunctionCodeFile(String functionName, Runtime runtime, String codeFile) throws Exception {
    +
    +        CommandGenerator generator = new CommandGenerator();
    +        generator.setFunctionName(functionName);
    +        generator.setRuntime(runtime);
    +        String command = generator.generateUpdateFunctionCommand(codeFile);
    +
    +        log.info("---------- Function command: {}", command);
    +        String[] commands = {
    +                "sh", "-c", command
    +        };
    +        ContainerExecResult result = pulsarCluster.getAnyWorker().execCmd(
    +                commands);
    +        assertTrue(result.getStdout().contains("Updated successfully"));
    +    }
    +
         protected  void submitFunction(Runtime runtime,
                                           String inputTopicName,
                                           String outputTopicName,
                                           String functionName,
                                           String functionFile,
                                           String functionClass,
    +                                      Map inputSerdeClassNames,
                                           String outputSerdeClassName,
                                           Map userConfigs) throws Exception {
     
    @@ -913,6 +944,7 @@ protected  void submitFunction(Runtime runtime,
             }
             generator.setSinkTopic(outputTopicName);
             generator.setFunctionName(functionName);
    +        generator.setCustomSerDeSourceTopics(inputSerdeClassNames);
             generator.setOutputSerDe(outputSerdeClassName);
             if (userConfigs != null) {
                 generator.setUserConfig(userConfigs);
    @@ -945,8 +977,17 @@ private  void ensureSubscriptionCreated(String inputTopicName,
             try (PulsarClient client = PulsarClient.builder()
                     .serviceUrl(pulsarCluster.getPlainTextServiceUrl())
                     .build()) {
    +            List topics = new ArrayList<>();
    +            if (inputTopicName.endsWith(".*")) {
    +                topics.add(inputTopicName.substring(0, inputTopicName.length() - 2) + "1");
    +                topics.add(inputTopicName.substring(0, inputTopicName.length() - 2) + "2");
    +            } else if (inputTopicName.contains(",")) {
    +                topics.addAll(Arrays.asList(inputTopicName.split(",")));
    +            } else {
    +                topics.add(inputTopicName);
    +            }
                 try (Consumer ignored = client.newConsumer(inputTopicSchema)
    -                    .topic(inputTopicName)
    +                    .topic(topics.toArray(new String[0]))
                         .subscriptionType(SubscriptionType.Shared)
                         .subscriptionName(subscriptionName)
                         .subscribe()) {
    @@ -1042,7 +1083,8 @@ private void getFunctionStats(String functionName, int numMessages) throws Excep
             assertEquals(functionStats.instances.get(0).getMetrics().getUserExceptionsTotal(), 0);
             assertTrue(functionStats.instances.get(0).getMetrics().getAvgProcessLatency() > 0);
             assertEquals(functionStats.instances.get(0).getMetrics().getOneMin().getReceivedTotal(), numMessages);
    -        assertEquals(functionStats.instances.get(0).getMetrics().getOneMin().getProcessedSuccessfullyTotal(), numMessages);
    +        assertEquals(functionStats.instances.get(0).getMetrics().getOneMin().getProcessedSuccessfullyTotal(),
    +                numMessages);
             assertEquals(functionStats.instances.get(0).getMetrics().getOneMin().getSystemExceptionsTotal(), 0);
             assertEquals(functionStats.instances.get(0).getMetrics().getOneMin().getUserExceptionsTotal(), 0);
             assertTrue(functionStats.instances.get(0).getMetrics().getOneMin().getAvgProcessLatency() > 0);
    @@ -1071,19 +1113,29 @@ private void getFunctionInfoNotFound(String functionName) throws Exception {
         }
     
         private void checkSubscriptionsCleanup(String topic) throws Exception {
    -        try {
    -            ContainerExecResult result = pulsarCluster.getAnyBroker().execCmd(
    -                    PulsarCluster.ADMIN_SCRIPT,
    -                    "topics",
    -                    "stats",
    -                    topic);
    -            TopicStats topicStats = ObjectMapperFactory.getMapper().reader()
    -                    .readValue(result.getStdout(), TopicStats.class);
    -            assertEquals(topicStats.getSubscriptions().size(), 0);
    -
    -        } catch (ContainerExecException e) {
    -            fail("Command should have exited with non-zero");
    +        List topics = new ArrayList<>();
    +        if (topic.endsWith(".*")) {
    +            topics.add(topic.substring(0, topic.length() - 2) + "1");
    +            topics.add(topic.substring(0, topic.length() - 2) + "2");
    +        } else if (topic.contains(",")) {
    +            topics.addAll(Arrays.asList(topic.split(",")));
    +        } else {
    +            topics.add(topic);
             }
    +        topics.stream().forEach(t -> {
    +            try {
    +                ContainerExecResult result = pulsarCluster.getAnyBroker().execCmd(
    +                        PulsarCluster.ADMIN_SCRIPT,
    +                        "topics",
    +                        "stats",
    +                        t);
    +                TopicStats topicStats = ObjectMapperFactory.getMapper().reader()
    +                        .readValue(result.getStdout(), TopicStats.class);
    +                assertEquals(topicStats.getSubscriptions().size(), 0);
    +            } catch (Exception e) {
    +                fail("Command should have exited with non-zero");
    +            }
    +        });
         }
     
         private void checkPublisherCleanup(String topic) throws Exception {
    @@ -1145,7 +1197,8 @@ private void doGetFunctionStatus(String functionName, int numMessages, boolean c
                 lastInvocationTimeGreaterThanZero = lastInvocationTimeGreaterThanZero
                         || functionStatus.getInstances().get(i).getStatus().getLastInvocationTime() > 0;
                 totalMessagesProcessed += functionStatus.getInstances().get(i).getStatus().getNumReceived();
    -            totalMessagesSuccessfullyProcessed += functionStatus.getInstances().get(i).getStatus().getNumSuccessfullyProcessed();
    +            totalMessagesSuccessfullyProcessed +=
    +                    functionStatus.getInstances().get(i).getStatus().getNumSuccessfullyProcessed();
                 if (checkRestarts) {
                     assertEquals(functionStatus.getInstances().get(i).getStatus().getNumRestarts(), 0);
                 }
    @@ -1238,6 +1291,23 @@ private void publishAndConsumeMessagesBytes(String inputTopic,
                     producer1.send(("message-" + i).getBytes(UTF_8));
                 }
     
    +            for (int i = numMessages / 2; i < numMessages; i++) {
    +                producer2.send(("message-" + i).getBytes(UTF_8));
    +            }
    +        } else if (inputTopic.contains(",")) {
    +            String[] topics = inputTopic.split(",");
    +            @Cleanup Producer producer1 = client.newProducer(Schema.BYTES)
    +                    .topic(topics[0])
    +                    .create();
    +
    +            @Cleanup Producer producer2 = client.newProducer(Schema.BYTES)
    +                    .topic(topics[1])
    +                    .create();
    +
    +            for (int i = 0; i < numMessages / 2; i++) {
    +                producer1.send(("message-" + i).getBytes(UTF_8));
    +            }
    +
                 for (int i = numMessages / 2; i < numMessages; i++) {
                     producer2.send(("message-" + i).getBytes(UTF_8));
                 }
    @@ -1420,7 +1490,8 @@ protected void testAvroSchemaFunction(Runtime runtime) throws Exception {
                         AVRO_SCHEMA_FUNCTION_PYTHON_FILE,
                         AVRO_SCHEMA_PYTHON_CLASS,
                         Schema.AVRO(AvroTestObject.class),
    -                    null, objectMapper.writeValueAsString(inputSpecs), "avro", null, "avro_schema_test_function.AvroTestObject", "avro_schema_test_function.AvroTestObject");
    +                    null, objectMapper.writeValueAsString(inputSpecs), "avro", null,
    +                    "avro_schema_test_function.AvroTestObject", "avro_schema_test_function.AvroTestObject");
             }
             log.info("pulsar submitFunction");
     
    @@ -1430,7 +1501,7 @@ protected void testAvroSchemaFunction(Runtime runtime) throws Exception {
             Set expectedSet = new HashSet<>();
     
             log.info("test-avro-schema producer connected: " + producer.isConnected());
    -        for (int i = 0 ; i < numMessages ; i++) {
    +        for (int i = 0; i < numMessages; i++) {
                 AvroTestObject inputObject = new AvroTestObject();
                 inputObject.setBaseValue(i);
                 MessageId messageId = producer.send(inputObject);
    @@ -1457,7 +1528,7 @@ protected void testAvroSchemaFunction(Runtime runtime) throws Exception {
             });
     
             log.info("test-avro-schema consumer connected: " + consumer.isConnected());
    -        for (int i = 0 ; i < numMessages ; i++) {
    +        for (int i = 0; i < numMessages; i++) {
                 log.info("test-avro-schema consumer receive [{}] start", i);
                 Message message = consumer.receive();
                 log.info("test-avro-schema consumer receive [{}] over", i);
    @@ -1495,7 +1566,8 @@ protected void testInitFunction(Runtime runtime) throws Exception {
             final int numMessages = 10;
     
             // submit the exclamation function
    -        submitFunction(runtime, inputTopicName, outputTopicName, functionName, null, InitializableFunction.class.getName(), schema,
    +        submitFunction(runtime, inputTopicName, outputTopicName, functionName, null,
    +                InitializableFunction.class.getName(), schema,
                     Collections.singletonMap("publish-topic", outputTopicName), null, null, null, null, null);
     
             // publish and consume result
    @@ -1650,7 +1722,8 @@ private void publishAndConsumeMessages(String inputTopic,
         }
     
     
    -    protected void testGenericObjectFunction(String function, boolean removeAgeField, boolean keyValue) throws Exception {
    +    protected void testGenericObjectFunction(String function, boolean removeAgeField, boolean keyValue)
    +            throws Exception {
             log.info("start {} function test ...", function);
     
             String ns = "public/ns-genericobject-" + randomName(8);
    @@ -1721,13 +1794,14 @@ protected void testGenericObjectFunction(String function, boolean removeAgeField
                         GenericRecord genericRecord = message.getValue();
                         if (keyValue) {
                             @SuppressWarnings("unchecked")
    -                        KeyValue keyValueObject = (KeyValue) genericRecord.getNativeObject();
    +                        KeyValue keyValueObject =
    +                                (KeyValue) genericRecord.getNativeObject();
                             GenericRecord key = keyValueObject.getKey();
                             GenericRecord value = keyValueObject.getValue();
    -                        key.getFields().forEach(f-> {
    +                        key.getFields().forEach(f -> {
                                 log.info("key field {} value {}", f.getName(), key.getField(f.getName()));
                             });
    -                        value.getFields().forEach(f-> {
    +                        value.getFields().forEach(f -> {
                                 log.info("value field {} value {}", f.getName(), value.getField(f.getName()));
                             });
                             assertEquals(i, key.getField("age"));
    @@ -1743,7 +1817,7 @@ protected void testGenericObjectFunction(String function, boolean removeAgeField
                         } else {
                             GenericRecord value = genericRecord;
                             log.info("received value {}", value);
    -                        value.getFields().forEach(f-> {
    +                        value.getFields().forEach(f -> {
                                 log.info("value field {} value {}", f.getName(), value.getField(f.getName()));
                             });
     
    @@ -1939,13 +2013,14 @@ private void prepareDataForMergeFunction(String ns,
         }
     
         private  void generateDataByDifferentSchema(String ns,
    -                                               String baseTopic,
    -                                               PulsarClient pulsarClient,
    -                                               Schema schema,
    -                                               T data,
    -                                               int messageCnt,
    -                                               ObjectNode inputSpecNode,
    -                                               Map topicMsgCntMap) throws PulsarClientException {
    +                                                   String baseTopic,
    +                                                   PulsarClient pulsarClient,
    +                                                   Schema schema,
    +                                                   T data,
    +                                                   int messageCnt,
    +                                                   ObjectNode inputSpecNode,
    +                                                   Map topicMsgCntMap)
    +            throws PulsarClientException {
             String topic = ns + "/" + baseTopic;
             Producer producer = pulsarClient.newProducer(schema)
                     .topic(topic)
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTestBase.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTestBase.java
    index 033e590d6dd4e..288ced63ae5eb 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTestBase.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTestBase.java
    @@ -54,7 +54,7 @@ public abstract class PulsarFunctionsTestBase extends PulsarTestSuite {
         public static final String SERDE_JAVA_CLASS =
                 "org.apache.pulsar.functions.api.examples.CustomBaseToBaseFunction";
     
    -    public static final String SERDE_OUTPUT_CLASS =
    +    public static final String SERDE_CLASS =
                 "org.apache.pulsar.functions.api.examples.CustomBaseSerde";
     
         public static final String EXCLAMATION_PYTHON_CLASS =
    @@ -151,6 +151,8 @@ protected static String getExclamationClass(Runtime runtime,
                 } else {
                     return EXCLAMATION_PYTHON_CLASS;
                 }
    +        } else if (Runtime.GO == runtime) {
    +            return null;
             } else {
                 throw new IllegalArgumentException("Unsupported runtime : " + runtime);
             }
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/go/PulsarFunctionsGoTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/go/PulsarFunctionsGoTest.java
    index 9ef2398c55c00..ad8dc475aac20 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/go/PulsarFunctionsGoTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/go/PulsarFunctionsGoTest.java
    @@ -34,4 +34,9 @@ public void testGoFunctionLocalRun() throws Exception {
             testFunctionLocalRun(Runtime.GO);
         }
     
    +    @Test(groups = {"go_function", "function"})
    +    public void testGoExclamationMultiInputsFunction() throws Exception {
    +        testExclamationFunction(Runtime.GO, false, false, true, false);
    +    }
    +
     }
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarFunctionsJavaTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarFunctionsJavaTest.java
    index e6f3c67ebcdb7..939d6e19d1fb1 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarFunctionsJavaTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarFunctionsJavaTest.java
    @@ -19,9 +19,9 @@
     package org.apache.pulsar.tests.integration.functions.java;
     
     import static org.testng.Assert.assertEquals;
    -
     import java.util.Collections;
    -
    +import java.util.Map;
    +import org.apache.commons.collections4.map.HashedMap;
     import org.apache.pulsar.client.admin.PulsarAdmin;
     import org.apache.pulsar.common.policies.data.FunctionStatus;
     import org.apache.pulsar.common.policies.data.FunctionStatusUtil;
    @@ -70,10 +70,13 @@ private void testCustomSerdeFunction() throws Exception {
                 admin.topics().createNonPartitionedTopic(outputTopicName);
             }
     
    +        Map inputTopicsSerde = new HashedMap<>();
    +        inputTopicsSerde.put(inputTopicName, SERDE_CLASS);
    +
             String functionName = "test-serde-fn-" + randomName(8);
             submitFunction(
    -                Runtime.JAVA, inputTopicName, outputTopicName, functionName, null, SERDE_JAVA_CLASS,
    -                SERDE_OUTPUT_CLASS, Collections.singletonMap("serde-topic", outputTopicName)
    +                Runtime.JAVA, inputTopicName, outputTopicName, functionName, null, SERDE_JAVA_CLASS, inputTopicsSerde,
    +                SERDE_CLASS, Collections.singletonMap("serde-topic", outputTopicName)
             );
     
             // get function info
    @@ -98,12 +101,12 @@ private void testCustomSerdeFunction() throws Exception {
     
         @Test(groups = {"java_function", "function"})
         public void testJavaExclamationFunction() throws Exception {
    -        testExclamationFunction(Runtime.JAVA, false, false, false);
    +        testExclamationFunction(Runtime.JAVA, false, false, false, false);
         }
     
         @Test(groups = {"java_function", "function"})
         public void testJavaExclamationTopicPatternFunction() throws Exception {
    -        testExclamationFunction(Runtime.JAVA, true, false, false);
    +        testExclamationFunction(Runtime.JAVA, true, false, false, false);
         }
     
         @Test(groups = {"java_function", "function"})
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarWorkerRebalanceDrainTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarWorkerRebalanceDrainTest.java
    index a6f54349f2689..de29cbc94c295 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarWorkerRebalanceDrainTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/java/PulsarWorkerRebalanceDrainTest.java
    @@ -18,6 +18,9 @@
      */
     package org.apache.pulsar.tests.integration.functions.java;
     
    +import static org.testng.Assert.assertEquals;
    +import static org.testng.Assert.assertTrue;
    +import com.fasterxml.jackson.databind.MappingIterator;
     import java.io.IOException;
     import java.util.ArrayList;
     import java.util.Arrays;
    @@ -26,10 +29,9 @@
     import java.util.HashMap;
     import java.util.List;
     import java.util.Map;
    -
    -import com.fasterxml.jackson.databind.MappingIterator;
     import lombok.extern.slf4j.Slf4j;
     import lombok.val;
    +import org.apache.commons.collections4.map.HashedMap;
     import org.apache.pulsar.client.admin.PulsarAdmin;
     import org.apache.pulsar.common.functions.WorkerInfo;
     import org.apache.pulsar.common.policies.data.FunctionStatus;
    @@ -41,8 +43,6 @@
     import org.apache.pulsar.tests.integration.topologies.FunctionRuntimeType;
     import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
     import org.testng.annotations.Test;
    -import static org.testng.Assert.assertEquals;
    -import static org.testng.Assert.assertTrue;
     
     @Slf4j
     public abstract class PulsarWorkerRebalanceDrainTest extends PulsarFunctionsTest {
    @@ -251,9 +251,12 @@ private void createFunctionWorker(String functionName, String topicPrefix) throw
                 admin.topics().createNonPartitionedTopic(outputTopicName);
             }
     
    +        Map inputTopicsSerde = new HashedMap<>();
    +        inputTopicsSerde.put(inputTopicName, SERDE_CLASS);
    +
             submitFunction(
    -                Runtime.JAVA, inputTopicName, outputTopicName, functionName, null, SERDE_JAVA_CLASS,
    -                SERDE_OUTPUT_CLASS, Collections.singletonMap(topicPrefix, outputTopicName)
    +                Runtime.JAVA, inputTopicName, outputTopicName, functionName, null, SERDE_JAVA_CLASS, inputTopicsSerde,
    +                SERDE_CLASS, Collections.singletonMap(topicPrefix, outputTopicName)
             );
         }
     
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/python/PulsarFunctionsPythonTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/python/PulsarFunctionsPythonTest.java
    index 1c75d70498609..87a52d27e8927 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/python/PulsarFunctionsPythonTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/python/PulsarFunctionsPythonTest.java
    @@ -46,22 +46,22 @@ public void testPythonPublishFunction() throws Exception {
     
         @Test(groups = {"python_function", "function"})
         public void testPythonExclamationFunction() throws Exception {
    -        testExclamationFunction(Runtime.PYTHON, false, false, false);
    +        testExclamationFunction(Runtime.PYTHON, false, false, false, false);
         }
     
         @Test(groups = {"python_function", "function"})
         public void testPythonExclamationFunctionWithExtraDeps() throws Exception {
    -        testExclamationFunction(Runtime.PYTHON, false, false, true);
    +        testExclamationFunction(Runtime.PYTHON, false, false, false, true);
         }
     
         @Test(groups = {"python_function", "function"})
         public void testPythonExclamationZipFunction() throws Exception {
    -        testExclamationFunction(Runtime.PYTHON, false, true, false);
    +        testExclamationFunction(Runtime.PYTHON, false, true, false, false);
         }
     
         @Test(groups = {"python_function", "function"})
         public void testPythonExclamationTopicPatternFunction() throws Exception {
    -        testExclamationFunction(Runtime.PYTHON, true, false, false);
    +        testExclamationFunction(Runtime.PYTHON, true, false, false, false);
         }
     
         @Test(groups = {"python_function", "function"})
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/utils/CommandGenerator.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/utils/CommandGenerator.java
    index 90fac7a055268..adc791fab4dc8 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/utils/CommandGenerator.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/utils/CommandGenerator.java
    @@ -46,7 +46,7 @@ public enum Runtime {
         private String functionClassName;
         private String sourceTopic;
         private String sourceTopicPattern;
    -    private Map customSereSourceTopics;
    +    private Map customSerDeSourceTopics;
         private String sinkTopic;
         private String logTopic;
         private String outputSerDe;
    @@ -182,8 +182,8 @@ public String generateCreateFunctionCommand(String codeFile) {
             if (batchBuilder != null) {
                 commandBuilder.append("--batch-builder" + batchBuilder);
             }
    -        if (customSereSourceTopics != null && !customSereSourceTopics.isEmpty()) {
    -            commandBuilder.append(" --customSerdeInputs \'" + new Gson().toJson(customSereSourceTopics) + "\'");
    +        if (customSerDeSourceTopics != null && !customSerDeSourceTopics.isEmpty()) {
    +            commandBuilder.append(" --customSerdeInputs \'" + new Gson().toJson(customSerDeSourceTopics) + "\'");
             }
             if (sinkTopic != null) {
                 commandBuilder.append(" --output " + sinkTopic);
    @@ -280,8 +280,8 @@ public String generateUpdateFunctionCommand(String codeFile) {
             if (StringUtils.isNotEmpty(sourceTopic)) {
                 commandBuilder.append(" --inputs " + sourceTopic);
             }
    -        if (customSereSourceTopics != null && !customSereSourceTopics.isEmpty()) {
    -            commandBuilder.append(" --customSerdeInputs \'" + new Gson().toJson(customSereSourceTopics) + "\'");
    +        if (customSerDeSourceTopics != null && !customSerDeSourceTopics.isEmpty()) {
    +            commandBuilder.append(" --customSerdeInputs \'" + new Gson().toJson(customSerDeSourceTopics) + "\'");
             }
             if (batchBuilder != null) {
                 commandBuilder.append("--batch-builder" + batchBuilder);
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch7SinkTester.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch7SinkTester.java
    index 65b38c677bfc5..d99fcad252706 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch7SinkTester.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch7SinkTester.java
    @@ -19,7 +19,6 @@
     package org.apache.pulsar.tests.integration.io.sinks;
     
     import java.util.Optional;
    -import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
     import org.testcontainers.elasticsearch.ElasticsearchContainer;
     
     public class ElasticSearch7SinkTester extends ElasticSearchSinkTester {
    @@ -32,8 +31,9 @@ public ElasticSearch7SinkTester(boolean schemaEnable) {
             super(schemaEnable);
         }
     
    +
         @Override
    -    protected ElasticsearchContainer createSinkService(PulsarCluster cluster) {
    +    protected ElasticsearchContainer createElasticContainer() {
             return new ElasticsearchContainer(ELASTICSEARCH_7)
                     .withEnv("ES_JAVA_OPTS", "-Xms128m -Xmx256m");
         }
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch8SinkTester.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch8SinkTester.java
    index bb52c4ff03fea..8e7617a82a5b9 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch8SinkTester.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearch8SinkTester.java
    @@ -19,13 +19,12 @@
     package org.apache.pulsar.tests.integration.io.sinks;
     
     import java.util.Optional;
    -import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
     import org.testcontainers.elasticsearch.ElasticsearchContainer;
     
     public class ElasticSearch8SinkTester extends ElasticSearchSinkTester {
     
         public static final String ELASTICSEARCH_8 = Optional.ofNullable(System.getenv("ELASTICSEARCH_IMAGE_V8"))
    -            .orElse("docker.elastic.co/elasticsearch/elasticsearch:8.5.1");
    +            .orElse("docker.elastic.co/elasticsearch/elasticsearch:8.5.3");
     
     
         public ElasticSearch8SinkTester(boolean schemaEnable) {
    @@ -33,7 +32,7 @@ public ElasticSearch8SinkTester(boolean schemaEnable) {
         }
     
         @Override
    -    protected ElasticsearchContainer createSinkService(PulsarCluster cluster) {
    +    protected ElasticsearchContainer createElasticContainer() {
             return new ElasticsearchContainer(ELASTICSEARCH_8)
                     .withEnv("ES_JAVA_OPTS", "-Xms128m -Xmx256m")
                     .withEnv("xpack.security.enabled", "false")
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearchSinkTester.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearchSinkTester.java
    index 546dd1b9113ab..0784055d29003 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearchSinkTester.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/ElasticSearchSinkTester.java
    @@ -19,15 +19,6 @@
     package org.apache.pulsar.tests.integration.io.sinks;
     
     import static org.testng.Assert.assertTrue;
    -
    -import java.util.Arrays;
    -import java.util.HashMap;
    -import java.util.HashSet;
    -import java.util.LinkedHashMap;
    -import java.util.List;
    -import java.util.Map;
    -import java.util.Set;
    -
     import co.elastic.clients.elasticsearch.ElasticsearchClient;
     import co.elastic.clients.elasticsearch.core.SearchRequest;
     import co.elastic.clients.elasticsearch.core.SearchResponse;
    @@ -35,6 +26,13 @@
     import co.elastic.clients.transport.ElasticsearchTransport;
     import co.elastic.clients.transport.rest_client.RestClientTransport;
     import com.google.common.collect.ImmutableMap;
    +import java.util.Arrays;
    +import java.util.HashMap;
    +import java.util.HashSet;
    +import java.util.LinkedHashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Set;
     import lombok.AllArgsConstructor;
     import lombok.Cleanup;
     import lombok.Data;
    @@ -46,6 +44,7 @@
     import org.apache.pulsar.common.schema.KeyValue;
     import org.apache.pulsar.common.schema.KeyValueEncodingType;
     import org.apache.pulsar.common.util.ObjectMapperFactory;
    +import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
     import org.awaitility.Awaitility;
     import org.elasticsearch.client.RestClient;
     import org.elasticsearch.client.RestClientBuilder;
    @@ -100,6 +99,29 @@ public ElasticSearchSinkTester(boolean schemaEnable) {
             }
         }
     
    +    @Override
    +    protected final ElasticsearchContainer createSinkService(PulsarCluster cluster) {
    +        ElasticsearchContainer elasticContainer = createElasticContainer();
    +        configureElasticContainer(elasticContainer);
    +        return elasticContainer;
    +    }
    +
    +    protected void configureElasticContainer(ElasticsearchContainer elasticContainer) {
    +        if (!isOpenSearch()) {
    +            elasticContainer.withEnv("ingest.geoip.downloader.enabled", "false");
    +        }
    +
    +        // allow disk to fill up beyond default 90% threshold
    +        elasticContainer.withEnv("cluster.routing.allocation.disk.threshold_enabled", "false");
    +
    +        elasticContainer.withLogConsumer(o -> log.info("elastic> {}", o.getUtf8String()));
    +    }
    +
    +    protected boolean isOpenSearch() {
    +        return false;
    +    }
    +
    +    protected abstract ElasticsearchContainer createElasticContainer();
     
         @Override
         public void prepareSink() throws Exception {
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/OpenSearchSinkTester.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/OpenSearchSinkTester.java
    index 1e10cc4189c1a..75f0fdac6f90c 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/OpenSearchSinkTester.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sinks/OpenSearchSinkTester.java
    @@ -18,9 +18,10 @@
      */
     package org.apache.pulsar.tests.integration.io.sinks;
     
    +import static org.testng.Assert.assertTrue;
    +import java.util.Map;
     import java.util.Optional;
     import org.apache.http.HttpHost;
    -import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
     import org.awaitility.Awaitility;
     import org.opensearch.action.search.SearchRequest;
     import org.opensearch.action.search.SearchResponse;
    @@ -31,10 +32,6 @@
     import org.testcontainers.elasticsearch.ElasticsearchContainer;
     import org.testcontainers.utility.DockerImageName;
     
    -import java.util.Map;
    -
    -import static org.testng.Assert.assertTrue;
    -
     public class OpenSearchSinkTester extends ElasticSearchSinkTester {
     
         public static final String OPENSEARCH = Optional.ofNullable(System.getenv("OPENSEARCH_IMAGE"))
    @@ -48,7 +45,7 @@ public OpenSearchSinkTester(boolean schemaEnable) {
         }
     
         @Override
    -    protected ElasticsearchContainer createSinkService(PulsarCluster cluster) {
    +    protected ElasticsearchContainer createElasticContainer() {
             DockerImageName dockerImageName = DockerImageName.parse(OPENSEARCH)
                     .asCompatibleSubstituteFor("docker.elastic.co/elasticsearch/elasticsearch");
             return new ElasticsearchContainer(dockerImageName)
    @@ -57,6 +54,10 @@ protected ElasticsearchContainer createSinkService(PulsarCluster cluster) {
                     .withEnv("plugins.security.disabled", "true");
         }
     
    +    protected boolean isOpenSearch() {
    +        return true;
    +    }
    +
         @Override
         public void prepareSink() throws Exception {
             RestClientBuilder builder = RestClient.builder(
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java
    new file mode 100644
    index 0000000000000..af14ef97f85c3
    --- /dev/null
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java
    @@ -0,0 +1,457 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *   http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +package org.apache.pulsar.tests.integration.loadbalance;
    +
    +import static org.apache.pulsar.tests.integration.containers.PulsarContainer.BROKER_HTTP_PORT;
    +import static org.assertj.core.api.Assertions.assertThat;
    +import static org.testng.Assert.assertEquals;
    +import static org.testng.Assert.assertFalse;
    +import static org.testng.Assert.assertNotEquals;
    +import static org.testng.Assert.assertTrue;
    +import static org.testng.Assert.fail;
    +import com.google.common.collect.Sets;
    +import java.util.ArrayList;
    +import java.util.Collection;
    +import java.util.HashMap;
    +import java.util.HashSet;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Random;
    +import java.util.Set;
    +import java.util.UUID;
    +import java.util.concurrent.CopyOnWriteArrayList;
    +import java.util.concurrent.CountDownLatch;
    +import java.util.concurrent.Executors;
    +import java.util.concurrent.TimeUnit;
    +import java.util.regex.Matcher;
    +import java.util.regex.Pattern;
    +import lombok.extern.slf4j.Slf4j;
    +import org.apache.pulsar.client.admin.PulsarAdmin;
    +import org.apache.pulsar.client.admin.PulsarAdminException;
    +import org.apache.pulsar.common.policies.data.AutoFailoverPolicyData;
    +import org.apache.pulsar.common.policies.data.AutoFailoverPolicyType;
    +import org.apache.pulsar.common.policies.data.BundlesData;
    +import org.apache.pulsar.common.policies.data.FailureDomain;
    +import org.apache.pulsar.common.policies.data.NamespaceIsolationData;
    +import org.apache.pulsar.common.policies.data.TenantInfoImpl;
    +import org.apache.pulsar.tests.TestRetrySupport;
    +import org.apache.pulsar.tests.integration.containers.BrokerContainer;
    +import org.apache.pulsar.tests.integration.topologies.PulsarCluster;
    +import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec;
    +import org.awaitility.Awaitility;
    +import org.testng.annotations.AfterClass;
    +import org.testng.annotations.BeforeClass;
    +import org.testng.annotations.BeforeMethod;
    +import org.testng.annotations.Test;
    +
    +
    +/**
    + * Integration tests for Pulsar ExtensibleLoadManagerImpl.
    + */
    +@Slf4j
    +public class ExtensibleLoadManagerTest extends TestRetrySupport {
    +
    +    private static final int NUM_BROKERS = 3;
    +    private static final String DEFAULT_TENANT = "my-tenant";
    +    private static final String DEFAULT_NAMESPACE = DEFAULT_TENANT + "/my-namespace";
    +    private static final String nsSuffix = "-anti-affinity-enabled";
    +
    +    private final String clusterName = "MultiLoadManagerTest-" + UUID.randomUUID();
    +    private final PulsarClusterSpec spec = PulsarClusterSpec.builder()
    +            .clusterName(clusterName)
    +            .numBrokers(NUM_BROKERS).build();
    +    private PulsarCluster pulsarCluster = null;
    +    private List brokerUrls = null;
    +    private String hosts;
    +    private PulsarAdmin admin;
    +
    +    @BeforeClass(alwaysRun = true)
    +    public void setup() throws Exception {
    +        incrementSetupNumber();
    +        Map brokerEnvs = new HashMap<>();
    +        brokerEnvs.put("loadManagerClassName",
    +                "org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl");
    +        brokerEnvs.put("loadBalancerLoadSheddingStrategy",
    +                "org.apache.pulsar.broker.loadbalance.extensions.scheduler.TransferShedder");
    +        brokerEnvs.put("forceDeleteNamespaceAllowed", "true");
    +        brokerEnvs.put("loadBalancerDebugModeEnabled", "true");
    +        brokerEnvs.put("PULSAR_MEM", "-Xmx512M");
    +        spec.brokerEnvs(brokerEnvs);
    +        pulsarCluster = PulsarCluster.forSpec(spec);
    +        pulsarCluster.start();
    +        brokerUrls = brokerUrls();
    +
    +        hosts = pulsarCluster.getAllBrokersHttpServiceUrl();
    +        admin = PulsarAdmin.builder().serviceHttpUrl(hosts).build();
    +        // all brokers alive
    +        assertEquals(admin.brokers().getActiveBrokers(clusterName).size(), NUM_BROKERS);
    +
    +        admin.tenants().createTenant(DEFAULT_TENANT,
    +                new TenantInfoImpl(new HashSet<>(), Set.of(pulsarCluster.getClusterName())));
    +        admin.namespaces().createNamespace(DEFAULT_NAMESPACE, 100);
    +    }
    +
    +    @AfterClass(alwaysRun = true)
    +    public void cleanup() {
    +        markCurrentSetupNumberCleaned();
    +        if (pulsarCluster != null) {
    +            pulsarCluster.stop();
    +            pulsarCluster = null;
    +        }
    +        if (admin != null) {
    +            admin.close();
    +            admin = null;
    +        }
    +    }
    +
    +    @BeforeMethod(alwaysRun = true)
    +    public void startBroker() {
    +        if (pulsarCluster != null) {
    +            pulsarCluster.getBrokers().forEach(brokerContainer -> {
    +                if (!brokerContainer.isRunning()) {
    +                    brokerContainer.start();
    +                }
    +            });
    +            String topicName = "persistent://" + DEFAULT_NAMESPACE + "/startBrokerCheck";
    +            Awaitility.await().atMost(120, TimeUnit.SECONDS).ignoreExceptions().until(
    +                    () -> {
    +                        for (BrokerContainer brokerContainer : pulsarCluster.getBrokers()) {
    +                            try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(
    +                                    brokerContainer.getHttpServiceUrl()).build()) {
    +                                if (admin.brokers().getActiveBrokers(clusterName).size() != NUM_BROKERS) {
    +                                    return false;
    +                                }
    +                                try {
    +                                    admin.topics().createPartitionedTopic(topicName, 10);
    +                                } catch (PulsarAdminException.ConflictException e) {
    +                                    // expected
    +                                }
    +                                admin.lookups().lookupPartitionedTopic(topicName);
    +                            }
    +                        }
    +                        return true;
    +                    }
    +            );
    +        }
    +    }
    +
    +    @Test(timeOut = 40 * 1000)
    +    public void testConcurrentLookups() throws Exception {
    +        String topicName = "persistent://" + DEFAULT_NAMESPACE + "/testConcurrentLookups";
    +        List admins = new ArrayList<>();
    +        int numAdminForBroker = 10;
    +        for (String url : brokerUrls) {
    +            for (int i = 0; i < numAdminForBroker; i++) {
    +                admins.add(PulsarAdmin.builder().serviceHttpUrl(url).build());
    +            }
    +        }
    +
    +        admin.topics().createPartitionedTopic(topicName, 100);
    +
    +        var executor = Executors.newFixedThreadPool(admins.size());
    +
    +        CountDownLatch latch = new CountDownLatch(admins.size());
    +        List> result = new CopyOnWriteArrayList<>();
    +        for(var admin : admins) {
    +            executor.execute(() -> {
    +                try {
    +                    result.add(admin.lookups().lookupPartitionedTopic(topicName));
    +                } catch (PulsarAdminException e) {
    +                    log.error("Lookup partitioned topic failed.", e);
    +                }
    +                latch.countDown();
    +            });
    +        }
    +        latch.await();
    +
    +        assertEquals(result.size(), admins.size());
    +
    +        for (int i = 1; i < admins.size(); i++) {
    +            assertEquals(result.get(i - 1), result.get(i));
    +        }
    +        admins.forEach(a -> a.close());
    +        executor.shutdown();
    +    }
    +
    +    @Test(timeOut = 30 * 1000)
    +    public void testTransferAdminApi() throws Exception {
    +        String topicName = "persistent://" + DEFAULT_NAMESPACE + "/testUnloadAdminApi";
    +        admin.topics().createNonPartitionedTopic(topicName);
    +        String broker = admin.lookups().lookupTopic(topicName);
    +
    +        int index = extractBrokerIndex(broker);
    +
    +        String bundleRange = admin.lookups().getBundleRange(topicName);
    +
    +        // Test transfer to current broker.
    +        try {
    +            admin.namespaces().unloadNamespaceBundle(DEFAULT_NAMESPACE, bundleRange, getBrokerUrl(index));
    +            fail();
    +        } catch (PulsarAdminException ex) {
    +            assertTrue(ex.getMessage().contains("cannot be transfer to same broker"));
    +        }
    +
    +        int transferToIndex = generateRandomExcludingX(NUM_BROKERS, index);
    +        assertNotEquals(transferToIndex, index);
    +        String transferTo = getBrokerUrl(transferToIndex);
    +        admin.namespaces().unloadNamespaceBundle(DEFAULT_NAMESPACE, bundleRange, transferTo);
    +
    +        broker = admin.lookups().lookupTopic(topicName);
    +
    +        index = extractBrokerIndex(broker);
    +        assertEquals(index, transferToIndex);
    +    }
    +
    +    @Test(timeOut = 30 * 1000)
    +    public void testSplitBundleAdminApi() throws Exception {
    +        String topicName = "persistent://" + DEFAULT_NAMESPACE + "/testSplitBundleAdminApi";
    +        admin.topics().createNonPartitionedTopic(topicName);
    +        String broker = admin.lookups().lookupTopic(topicName);
    +        log.info("The topic: {} owned by {}", topicName, broker);
    +        BundlesData bundles = admin.namespaces().getBundles(DEFAULT_NAMESPACE);
    +        int numBundles = bundles.getNumBundles();
    +        var bundleRanges = bundles.getBoundaries().stream().map(Long::decode).sorted().toList();
    +        String firstBundle = bundleRanges.get(0) + "_" + bundleRanges.get(1);
    +        admin.namespaces().splitNamespaceBundle(DEFAULT_NAMESPACE, firstBundle, true, null);
    +        BundlesData bundlesData = admin.namespaces().getBundles(DEFAULT_NAMESPACE);
    +
    +        long mid = bundleRanges.get(0) + (bundleRanges.get(1) - bundleRanges.get(0)) / 2;
    +
    +        assertEquals(bundlesData.getNumBundles(), numBundles + 1);
    +        String lowBundle = String.format("0x%08x", bundleRanges.get(0));
    +        String midBundle = String.format("0x%08x", mid);
    +        String highBundle = String.format("0x%08x", bundleRanges.get(1));
    +        assertTrue(bundlesData.getBoundaries().contains(lowBundle));
    +        assertTrue(bundlesData.getBoundaries().contains(midBundle));
    +        assertTrue(bundlesData.getBoundaries().contains(highBundle));
    +
    +        // Test split bundle with invalid bundle range.
    +        try {
    +            admin.namespaces().splitNamespaceBundle(DEFAULT_NAMESPACE, "invalid", true, null);
    +            fail();
    +        } catch (PulsarAdminException ex) {
    +            assertTrue(ex.getMessage().contains("Invalid bundle range"));
    +        }
    +    }
    +
    +    @Test(timeOut = 30 * 1000)
    +    public void testDeleteNamespace() throws Exception {
    +        String namespace = DEFAULT_TENANT + "/test-delete-namespace";
    +        String topicName = "persistent://" + namespace + "/test-delete-namespace-topic";
    +        admin.namespaces().createNamespace(namespace);
    +        admin.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet(clusterName));
    +        assertTrue(admin.namespaces().getNamespaces(DEFAULT_TENANT).contains(namespace));
    +        admin.topics().createPartitionedTopic(topicName, 2);
    +        String broker = admin.lookups().lookupTopic(topicName);
    +        log.info("The topic: {} owned by: {}", topicName, broker);
    +        admin.namespaces().deleteNamespace(namespace, true);
    +        assertFalse(admin.namespaces().getNamespaces(DEFAULT_TENANT).contains(namespace));
    +    }
    +
    +    @Test(timeOut = 120 * 1000)
    +    public void testStopBroker() throws Exception {
    +        String topicName = "persistent://" + DEFAULT_NAMESPACE + "/test-stop-broker-topic";
    +
    +        admin.topics().createNonPartitionedTopic(topicName);
    +        String broker = admin.lookups().lookupTopic(topicName);
    +        log.info("The topic: {} owned by: {}", topicName, broker);
    +
    +        int idx = extractBrokerIndex(broker);
    +        for (BrokerContainer container : pulsarCluster.getBrokers()) {
    +            String name = container.getHostName();
    +            if (name.contains(String.valueOf(idx))) {
    +                container.stop();
    +            }
    +        }
    +
    +        Awaitility.waitAtMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(() -> {
    +            String broker1 = admin.lookups().lookupTopic(topicName);
    +            assertNotEquals(broker1, broker);
    +        });
    +
    +    }
    +
    +    @Test(timeOut = 40 * 1000)
    +    public void testAntiaffinityPolicy() throws PulsarAdminException {
    +        final String namespaceAntiAffinityGroup = "my-anti-affinity-filter";
    +        final String antiAffinityEnabledNameSpace = DEFAULT_TENANT + "/my-ns-filter" + nsSuffix;
    +        final int numPartition = 20;
    +
    +        List activeBrokers = admin.brokers().getActiveBrokers();
    +
    +        assertEquals(activeBrokers.size(), NUM_BROKERS);
    +
    +        for (int i = 0; i < activeBrokers.size(); i++) {
    +            String namespace = antiAffinityEnabledNameSpace + "-" + i;
    +            admin.namespaces().createNamespace(namespace, 10);
    +            admin.namespaces().setNamespaceAntiAffinityGroup(namespace, namespaceAntiAffinityGroup);
    +            admin.clusters().createFailureDomain(clusterName, namespaceAntiAffinityGroup, FailureDomain.builder()
    +                    .brokers(Set.of(activeBrokers.get(i))).build());
    +        }
    +
    +        Set result = new HashSet<>();
    +        for (int i = 0; i < activeBrokers.size(); i++) {
    +            final String topic = "persistent://" + antiAffinityEnabledNameSpace + "-" + i +"/topic";
    +            admin.topics().createPartitionedTopic(topic, numPartition);
    +
    +            Map topicToBroker = admin.lookups().lookupPartitionedTopic(topic);
    +
    +            assertEquals(topicToBroker.size(), numPartition);
    +
    +            HashSet brokers = new HashSet<>(topicToBroker.values());
    +
    +            assertEquals(brokers.size(), 1);
    +            result.add(brokers.iterator().next());
    +            log.info("Topic: {}, lookup result: {}", topic, brokers.iterator().next());
    +        }
    +
    +        assertEquals(result.size(), NUM_BROKERS);
    +    }
    +
    +    @Test(timeOut = 300 * 1000)
    +    public void testIsolationPolicy() throws Exception {
    +        final String namespaceIsolationPolicyName = "my-isolation-policy";
    +        final String isolationEnabledNameSpace = DEFAULT_TENANT + "/my-isolation-policy" + nsSuffix;
    +        Map parameters1 = new HashMap<>();
    +        parameters1.put("min_limit", "1");
    +        parameters1.put("usage_threshold", "100");
    +
    +        Awaitility.await().atMost(10, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(
    +                () -> {
    +                    List activeBrokers = admin.brokers().getActiveBrokersAsync()
    +                            .get(5, TimeUnit.SECONDS);
    +                    assertEquals(activeBrokers.size(), NUM_BROKERS);
    +                }
    +        );
    +        try {
    +            admin.namespaces().createNamespace(isolationEnabledNameSpace);
    +        } catch (PulsarAdminException.ConflictException e) {
    +            //expected when retried
    +        }
    +
    +        try {
    +            admin.clusters()
    +                    .createNamespaceIsolationPolicy(clusterName, namespaceIsolationPolicyName, NamespaceIsolationData
    +                            .builder()
    +                            .namespaces(List.of(isolationEnabledNameSpace))
    +                            .autoFailoverPolicy(AutoFailoverPolicyData.builder()
    +                                    .policyType(AutoFailoverPolicyType.min_available)
    +                                    .parameters(parameters1)
    +                                    .build())
    +                            .primary(List.of(getHostName(0)))
    +                            .secondary(List.of(getHostName(1)))
    +                            .build());
    +        } catch (PulsarAdminException.ConflictException e) {
    +            //expected when retried
    +        }
    +
    +        final String topic = "persistent://" + isolationEnabledNameSpace + "/topic";
    +        try {
    +            admin.topics().createNonPartitionedTopic(topic);
    +        } catch (PulsarAdminException.ConflictException e) {
    +            //expected when retried
    +        }
    +
    +        String broker = admin.lookups().lookupTopic(topic);
    +        assertEquals(extractBrokerIndex(broker), 0);
    +
    +        for (BrokerContainer container : pulsarCluster.getBrokers()) {
    +            String name = container.getHostName();
    +            if (name.contains("0")) {
    +                container.stop();
    +            }
    +        }
    +
    +        Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(
    +                () -> {
    +                    List activeBrokers = admin.brokers().getActiveBrokersAsync()
    +                            .get(5, TimeUnit.SECONDS);
    +                    assertEquals(activeBrokers.size(), 2);
    +                }
    +        );
    +
    +        Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(() -> {
    +            String ownerBroker = admin.lookups().lookupTopicAsync(topic).get(5, TimeUnit.SECONDS);
    +            assertEquals(extractBrokerIndex(ownerBroker), 1);
    +        });
    +
    +        for (BrokerContainer container : pulsarCluster.getBrokers()) {
    +            String name = container.getHostName();
    +            if (name.contains("1")) {
    +                container.stop();
    +            }
    +        }
    +
    +        Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(
    +            () -> {
    +                List activeBrokers = admin.brokers().getActiveBrokersAsync().get(5, TimeUnit.SECONDS);
    +                assertEquals(activeBrokers.size(), 1);
    +            }
    +        );
    +
    +        Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(
    +                () -> {
    +                    try {
    +                        admin.lookups().lookupTopicAsync(topic).get(5, TimeUnit.SECONDS);
    +                    } catch (Exception ex) {
    +                        log.error("Failed to lookup topic: ", ex);
    +                        assertThat(ex.getMessage()).contains("Failed to select the new owner broker for bundle");
    +                    }
    +                }
    +        );
    +    }
    +
    +    private String getBrokerUrl(int index) {
    +        return String.format("pulsar-broker-%d:%d", index, BROKER_HTTP_PORT);
    +    }
    +
    +    private String getHostName(int index) {
    +        return String.format("pulsar-broker-%d", index);
    +    }
    +
    +    private int extractBrokerIndex(String broker) {
    +        String pattern = "pulsar://.*-(\\d+):\\d+";
    +        Pattern compiledPattern = Pattern.compile(pattern);
    +        Matcher matcher = compiledPattern.matcher(broker);
    +        if (!matcher.find()){
    +            throw new IllegalArgumentException("Failed to extract broker index");
    +        }
    +        return Integer.parseInt(matcher.group(1));
    +    }
    +
    +    private int generateRandomExcludingX(int n, int x) {
    +        Random random = new Random();
    +        int randomNumber;
    +
    +        do {
    +            randomNumber = random.nextInt(n);
    +        } while (randomNumber == x);
    +
    +        return randomNumber;
    +    }
    +
    +    private List brokerUrls() {
    +        Collection brokers = pulsarCluster.getBrokers();
    +        List brokerUrls = new ArrayList<>(NUM_BROKERS);
    +        brokers.forEach(broker -> {
    +            brokerUrls.add("http://" + broker.getHost() + ":" + broker.getMappedPort(BROKER_HTTP_PORT));
    +        });
    +        return brokerUrls;
    +    }
    +}
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java
    index 0a9bb5e19592a..87db46f2bb625 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java
    @@ -68,6 +68,7 @@ protected void beforeStartCluster() {
             envMap.put("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(secretKey));
             envMap.put("superUserRoles", "admin");
             envMap.put("brokerDeleteInactiveTopicsEnabled", "false");
    +        envMap.put("topicLevelPoliciesEnabled", "false");
     
             for (BrokerContainer brokerContainer : pulsarCluster.getBrokers()) {
                 brokerContainer.withEnv(envMap);
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java
    index 8bb6de74c661d..d0421063b2d90 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java
    @@ -31,6 +31,8 @@
     import org.apache.pulsar.client.api.schema.SchemaDefinition;
     import org.apache.pulsar.common.naming.TopicDomain;
     import org.apache.pulsar.common.naming.TopicName;
    +import org.apache.pulsar.common.schema.SchemaInfo;
    +import org.apache.pulsar.common.schema.SchemaType;
     import org.apache.pulsar.tests.integration.schema.Schemas.Person;
     import org.apache.pulsar.tests.integration.schema.Schemas.PersonConsumeSchema;
     import org.apache.pulsar.tests.integration.schema.Schemas.Student;
    @@ -316,5 +318,14 @@ public void testPrimitiveSchemaTypeCompatibilityCheck() {
     
         }
     
    +    @Test
    +    public void testDeletePartitionedTopicWhenTopicReferenceIsNotReady() throws Exception {
    +        final String topic = "persistent://public/default/tp-ref";
    +        admin.topics().createPartitionedTopic(topic, 20);
    +        admin.schemas().createSchema(topic,
    +                SchemaInfo.builder().type(SchemaType.STRING).schema(new byte[0]).build());
    +        admin.topics().deletePartitionedTopic(topic, false);
    +    }
    +
     }
     
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java
    index 59ff978cafa06..080912cd49262 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java
    @@ -29,6 +29,7 @@
     import org.apache.pulsar.client.api.PulsarClient;
     import org.apache.pulsar.client.api.PulsarClientException;
     import org.apache.pulsar.tests.integration.suites.PulsarTestSuite;
    +import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec;
     import org.testng.annotations.DataProvider;
     import org.testng.annotations.Test;
     
    @@ -41,6 +42,14 @@ private static String loadCertificateAuthorityFile(String name) {
             return Resources.getResource("certificate-authority/" + name).getPath();
         }
     
    +    @Override
    +    protected PulsarClusterSpec.PulsarClusterSpecBuilder beforeSetupCluster(
    +            String clusterName,
    +            PulsarClusterSpec.PulsarClusterSpecBuilder specBuilder) {
    +        specBuilder.enableTls(true);
    +        return specBuilder;
    +    }
    +
         @DataProvider(name = "adminUrls")
         public Object[][] adminUrls() {
             return new Object[][]{
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java
    index fcc0feec6d44f..c648f6968e24c 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java
    @@ -38,6 +38,7 @@
     import lombok.Getter;
     import lombok.extern.slf4j.Slf4j;
     import org.apache.commons.io.IOUtils;
    +import org.apache.pulsar.client.impl.auth.AuthenticationTls;
     import org.apache.pulsar.tests.integration.containers.BKContainer;
     import org.apache.pulsar.tests.integration.containers.BrokerContainer;
     import org.apache.pulsar.tests.integration.containers.CSContainer;
    @@ -132,14 +133,16 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s
             this.brokerContainers = Maps.newTreeMap();
             this.workerContainers = Maps.newTreeMap();
     
    -        this.proxyContainer = new ProxyContainer(appendClusterName("pulsar-proxy"), ProxyContainer.NAME)
    +        this.proxyContainer = new ProxyContainer(appendClusterName("pulsar-proxy"), ProxyContainer.NAME, spec.enableTls)
                     .withNetwork(network)
                     .withNetworkAliases(appendClusterName("pulsar-proxy"))
                     .withEnv("zkServers", appendClusterName(ZKContainer.NAME))
                     .withEnv("zookeeperServers", appendClusterName(ZKContainer.NAME))
                     .withEnv("configurationStoreServers", CSContainer.NAME + ":" + CS_PORT)
    -                .withEnv("clusterName", clusterName)
    +                .withEnv("clusterName", clusterName);
                     // enable mTLS
    +        if (spec.enableTls) {
    +            proxyContainer
                     .withEnv("webServicePortTls", String.valueOf(BROKER_HTTPS_PORT))
                     .withEnv("servicePortTls", String.valueOf(BROKER_PORT_TLS))
                     .withEnv("forwardAuthorizationCredentials", "true")
    @@ -147,7 +150,15 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s
                     .withEnv("tlsAllowInsecureConnection", "false")
                     .withEnv("tlsCertificateFilePath", "/pulsar/certificate-authority/server-keys/proxy.cert.pem")
                     .withEnv("tlsKeyFilePath", "/pulsar/certificate-authority/server-keys/proxy.key-pk8.pem")
    -                .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem");
    +                .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem")
    +                .withEnv("brokerClientAuthenticationPlugin", AuthenticationTls.class.getName())
    +                .withEnv("brokerClientAuthenticationParameters", String.format("tlsCertFile:%s,tlsKeyFile:%s", "/pulsar/certificate-authority/client-keys/admin.cert.pem", "/pulsar/certificate-authority/client-keys/admin.key-pk8.pem"))
    +                .withEnv("tlsEnabledWithBroker", "true")
    +                .withEnv("brokerClientTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem")
    +                .withEnv("brokerClientCertificateFilePath", "/pulsar/certificate-authority/server-keys/proxy.cert.pem")
    +                .withEnv("brokerClientKeyFilePath", "/pulsar/certificate-authority/server-keys/proxy.key-pk8.pem");
    +
    +        }
             if (spec.proxyEnvs != null) {
                 spec.proxyEnvs.forEach(this.proxyContainer::withEnv);
             }
    @@ -157,24 +168,34 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s
     
             // create bookies
             bookieContainers.putAll(
    -                runNumContainers("bookie", spec.numBookies(), (name) -> new BKContainer(clusterName, name)
    -                        .withNetwork(network)
    -                        .withNetworkAliases(appendClusterName(name))
    -                        .withEnv("zkServers", appendClusterName(ZKContainer.NAME))
    -                        .withEnv("useHostNameAsBookieID", "true")
    -                        // Disable fsyncs for tests since they're slow within the containers
    -                        .withEnv("journalSyncData", "false")
    -                        .withEnv("journalMaxGroupWaitMSec", "0")
    -                        .withEnv("clusterName", clusterName)
    -                        .withEnv("diskUsageThreshold", "0.99")
    -                        .withEnv("nettyMaxFrameSizeBytes", "" + spec.maxMessageSize)
    -                )
    +                runNumContainers("bookie", spec.numBookies(), (name) -> {
    +                    BKContainer bookieContainer = new BKContainer(clusterName, name)
    +                            .withNetwork(network)
    +                            .withNetworkAliases(appendClusterName(name))
    +                            .withEnv("zkServers", appendClusterName(ZKContainer.NAME))
    +                            .withEnv("useHostNameAsBookieID", "true")
    +                            // Disable fsyncs for tests since they're slow within the containers
    +                            .withEnv("journalSyncData", "false")
    +                            .withEnv("journalMaxGroupWaitMSec", "0")
    +                            .withEnv("clusterName", clusterName)
    +                            .withEnv("PULSAR_PREFIX_diskUsageWarnThreshold", "0.95")
    +                            .withEnv("diskUsageThreshold", "0.99")
    +                            .withEnv("PULSAR_PREFIX_diskUsageLwmThreshold", "0.97")
    +                            .withEnv("nettyMaxFrameSizeBytes", String.valueOf(spec.maxMessageSize));
    +                    if (spec.bookkeeperEnvs != null) {
    +                        bookieContainer.withEnv(spec.bookkeeperEnvs);
    +                    }
    +                    if (spec.bookieAdditionalPorts != null) {
    +                        spec.bookieAdditionalPorts.forEach(bookieContainer::addExposedPort);
    +                    }
    +                    return bookieContainer;
    +                })
             );
     
             // create brokers
             brokerContainers.putAll(
                 runNumContainers("broker", spec.numBrokers(), (name) -> {
    -                BrokerContainer brokerContainer = new BrokerContainer(clusterName, appendClusterName(name))
    +                BrokerContainer brokerContainer = new BrokerContainer(clusterName, appendClusterName(name), spec.enableTls)
                             .withNetwork(network)
                             .withNetworkAliases(appendClusterName(name))
                             .withEnv("zkServers", appendClusterName(ZKContainer.NAME))
    @@ -185,16 +206,19 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s
                             .withEnv("loadBalancerOverrideBrokerNicSpeedGbps", "1")
                             // used in s3 tests
                             .withEnv("AWS_ACCESS_KEY_ID", "accesskey").withEnv("AWS_SECRET_KEY", "secretkey")
    -                        .withEnv("maxMessageSize", "" + spec.maxMessageSize)
    +                        .withEnv("maxMessageSize", "" + spec.maxMessageSize);
    +                    if (spec.enableTls) {
                             // enable mTLS
    -                        .withEnv("webServicePortTls", String.valueOf(BROKER_HTTPS_PORT))
    -                        .withEnv("brokerServicePortTls", String.valueOf(BROKER_PORT_TLS))
    -                        .withEnv("authenticateOriginalAuthData", "true")
    -                        .withEnv("tlsRequireTrustedClientCertOnConnect", "true")
    -                        .withEnv("tlsAllowInsecureConnection", "false")
    -                        .withEnv("tlsCertificateFilePath", "/pulsar/certificate-authority/server-keys/broker.cert.pem")
    -                        .withEnv("tlsKeyFilePath", "/pulsar/certificate-authority/server-keys/broker.key-pk8.pem")
    -                        .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem");
    +                        brokerContainer
    +                            .withEnv("webServicePortTls", String.valueOf(BROKER_HTTPS_PORT))
    +                            .withEnv("brokerServicePortTls", String.valueOf(BROKER_PORT_TLS))
    +                            .withEnv("authenticateOriginalAuthData", "true")
    +                            .withEnv("tlsAllowInsecureConnection", "false")
    +                            .withEnv("tlsRequireTrustedClientCertOnConnect", "true")
    +                            .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem")
    +                            .withEnv("tlsCertificateFilePath", "/pulsar/certificate-authority/server-keys/broker.cert.pem")
    +                            .withEnv("tlsKeyFilePath", "/pulsar/certificate-authority/server-keys/broker.key-pk8.pem");
    +                    }
                         if (spec.queryLastMessage) {
                             brokerContainer.withEnv("bookkeeperExplicitLacIntervalInMills", "10");
                             brokerContainer.withEnv("bookkeeperUseV2WireProtocol", "false");
    @@ -501,37 +525,18 @@ public synchronized void setupFunctionWorkers(String suffix, FunctionRuntimeType
         }
     
         private void startFunctionWorkersWithProcessContainerFactory(String suffix, int numFunctionWorkers) {
    -        String serviceUrl = "pulsar://pulsar-broker-0:" + PulsarContainer.BROKER_PORT;
    -        String httpServiceUrl = "http://pulsar-broker-0:" + PulsarContainer.BROKER_HTTP_PORT;
             workerContainers.putAll(runNumContainers(
                 "functions-worker-process-" + suffix,
                 numFunctionWorkers,
    -            (name) -> new WorkerContainer(clusterName, name)
    -                .withNetwork(network)
    -                .withNetworkAliases(name)
    -                // worker settings
    -                .withEnv("PF_workerId", name)
    -                .withEnv("PF_workerHostname", name)
    -                .withEnv("PF_workerPort", "" + PulsarContainer.BROKER_HTTP_PORT)
    -                .withEnv("PF_pulsarFunctionsCluster", clusterName)
    -                .withEnv("PF_pulsarServiceUrl", serviceUrl)
    -                .withEnv("PF_pulsarWebServiceUrl", httpServiceUrl)
    -                // script
    -                .withEnv("clusterName", clusterName)
    -                .withEnv("zookeeperServers", ZKContainer.NAME)
    -                // bookkeeper tools
    -                .withEnv("zkServers", ZKContainer.NAME)
    +            (name) -> createWorkerContainer(name)
             ));
             this.startWorkers();
         }
     
    -    private void startFunctionWorkersWithThreadContainerFactory(String suffix, int numFunctionWorkers) {
    +    private WorkerContainer createWorkerContainer(String name) {
             String serviceUrl = "pulsar://pulsar-broker-0:" + PulsarContainer.BROKER_PORT;
             String httpServiceUrl = "http://pulsar-broker-0:" + PulsarContainer.BROKER_HTTP_PORT;
    -        workerContainers.putAll(runNumContainers(
    -            "functions-worker-thread-" + suffix,
    -            numFunctionWorkers,
    -            (name) -> new WorkerContainer(clusterName, name)
    +        return new WorkerContainer(clusterName, name)
                     .withNetwork(network)
                     .withNetworkAliases(name)
                     // worker settings
    @@ -541,13 +546,21 @@ private void startFunctionWorkersWithThreadContainerFactory(String suffix, int n
                     .withEnv("PF_pulsarFunctionsCluster", clusterName)
                     .withEnv("PF_pulsarServiceUrl", serviceUrl)
                     .withEnv("PF_pulsarWebServiceUrl", httpServiceUrl)
    -                .withEnv("PF_functionRuntimeFactoryClassName", "org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory")
    -                .withEnv("PF_functionRuntimeFactoryConfigs_threadGroupName", "pf-container-group")
                     // script
                     .withEnv("clusterName", clusterName)
                     .withEnv("zookeeperServers", ZKContainer.NAME)
                     // bookkeeper tools
    -                .withEnv("zkServers", ZKContainer.NAME)
    +                .withEnv("zkServers", ZKContainer.NAME);
    +    }
    +
    +    private void startFunctionWorkersWithThreadContainerFactory(String suffix, int numFunctionWorkers) {
    +        workerContainers.putAll(runNumContainers(
    +                "functions-worker-thread-" + suffix,
    +                numFunctionWorkers,
    +                (name) -> createWorkerContainer(name)
    +                        .withEnv("PF_functionRuntimeFactoryClassName",
    +                                "org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory")
    +                        .withEnv("PF_functionRuntimeFactoryConfigs_threadGroupName", "pf-container-group")
             ));
             this.startWorkers();
         }
    @@ -740,4 +753,8 @@ public void dumpFunctionLogs(String name) {
         private String appendClusterName(String name) {
             return sharedCsContainer ? clusterName + "-" + name : name;
         }
    +
    +    public BKContainer getAnyBookie() {
    +        return getAnyContainer(bookieContainers, "bookie");
    +    }
     }
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java
    index 385af99a6644b..c141e990d62e0 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java
    @@ -150,6 +150,11 @@ public class PulsarClusterSpec {
          */
         Map brokerEnvs;
     
    +    /**
    +     * Specify envs for bookkeeper.
    +     */
    +    Map bookkeeperEnvs;
    +
         /**
          * Specify mount files.
          */
    @@ -167,4 +172,15 @@ public class PulsarClusterSpec {
          * Additional ports to expose on broker containers.
          */
         List brokerAdditionalPorts;
    +
    +    /**
    +     * Additional ports to expose on bookie containers.
    +     */
    +    List bookieAdditionalPorts;
    +
    +    /**
    +     * Enable TLS for connection.
    +     */
    +    @Default
    +    boolean enableTls = false;
     }
    diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterTestBase.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterTestBase.java
    index d7a1906ec582e..ae9e44fa98254 100644
    --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterTestBase.java
    +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterTestBase.java
    @@ -34,8 +34,10 @@
     @Slf4j
     public abstract class PulsarClusterTestBase extends PulsarTestBase {
         protected final Map brokerEnvs = new HashMap<>();
    +    protected final Map bookkeeperEnvs = new HashMap<>();
         protected final Map proxyEnvs = new HashMap<>();
         protected final List brokerAdditionalPorts = new LinkedList<>();
    +    protected final List bookieAdditionalPorts = new LinkedList<>();
     
         @Override
         protected final void setup() throws Exception {
    diff --git a/tests/integration/src/test/resources/pulsar-auth.xml b/tests/integration/src/test/resources/pulsar-auth.xml
    index 81d2c1361a269..3647730dee201 100644
    --- a/tests/integration/src/test/resources/pulsar-auth.xml
    +++ b/tests/integration/src/test/resources/pulsar-auth.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    +  
    +    
             
    -    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml b/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml
    index b50af74ed17f0..b0f55f3e04b16 100644
    --- a/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml
    +++ b/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-cli.xml b/tests/integration/src/test/resources/pulsar-cli.xml
    index af55aca8a0098..1243dc7e485d7 100644
    --- a/tests/integration/src/test/resources/pulsar-cli.xml
    +++ b/tests/integration/src/test/resources/pulsar-cli.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-function.xml b/tests/integration/src/test/resources/pulsar-function.xml
    index a18a65ff2e16d..03a8d83997a3c 100644
    --- a/tests/integration/src/test/resources/pulsar-function.xml
    +++ b/tests/integration/src/test/resources/pulsar-function.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-io-ora-source.xml b/tests/integration/src/test/resources/pulsar-io-ora-source.xml
    index 1c5bb5faf677c..0a9927015be86 100644
    --- a/tests/integration/src/test/resources/pulsar-io-ora-source.xml
    +++ b/tests/integration/src/test/resources/pulsar-io-ora-source.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    +  
    +    
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-io-sinks.xml b/tests/integration/src/test/resources/pulsar-io-sinks.xml
    index 614c371f3e52c..046f977f3e13b 100644
    --- a/tests/integration/src/test/resources/pulsar-io-sinks.xml
    +++ b/tests/integration/src/test/resources/pulsar-io-sinks.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-io-sources.xml b/tests/integration/src/test/resources/pulsar-io-sources.xml
    index 636b3e479195f..c995a7df249dd 100644
    --- a/tests/integration/src/test/resources/pulsar-io-sources.xml
    +++ b/tests/integration/src/test/resources/pulsar-io-sources.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +      
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-loadbalance.xml b/tests/integration/src/test/resources/pulsar-loadbalance.xml
    new file mode 100644
    index 0000000000000..d28dd1e0da7ee
    --- /dev/null
    +++ b/tests/integration/src/test/resources/pulsar-loadbalance.xml
    @@ -0,0 +1,29 @@
    +
    +
    +
    +
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-messaging.xml b/tests/integration/src/test/resources/pulsar-messaging.xml
    index cfbdb22587034..f8813b614f5cb 100644
    --- a/tests/integration/src/test/resources/pulsar-messaging.xml
    +++ b/tests/integration/src/test/resources/pulsar-messaging.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -            
    -            
    -            
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-plugin.xml b/tests/integration/src/test/resources/pulsar-plugin.xml
    index f88b67256e547..06fa8ee628c0f 100644
    --- a/tests/integration/src/test/resources/pulsar-plugin.xml
    +++ b/tests/integration/src/test/resources/pulsar-plugin.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-process.xml b/tests/integration/src/test/resources/pulsar-process.xml
    index 8e5258d30624c..a6f1ac468d954 100644
    --- a/tests/integration/src/test/resources/pulsar-process.xml
    +++ b/tests/integration/src/test/resources/pulsar-process.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-proxy.xml b/tests/integration/src/test/resources/pulsar-proxy.xml
    index ae6f13810535c..fabfb54d781a3 100644
    --- a/tests/integration/src/test/resources/pulsar-proxy.xml
    +++ b/tests/integration/src/test/resources/pulsar-proxy.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-python.xml b/tests/integration/src/test/resources/pulsar-python.xml
    index a5faa6389e0f1..365768083d21b 100644
    --- a/tests/integration/src/test/resources/pulsar-python.xml
    +++ b/tests/integration/src/test/resources/pulsar-python.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    +  
    +    
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-schema.xml b/tests/integration/src/test/resources/pulsar-schema.xml
    index e07fdf2b2d86f..595bb8fda5521 100644
    --- a/tests/integration/src/test/resources/pulsar-schema.xml
    +++ b/tests/integration/src/test/resources/pulsar-schema.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-semantics.xml b/tests/integration/src/test/resources/pulsar-semantics.xml
    index 5b5402af4623b..69e171931f0e4 100644
    --- a/tests/integration/src/test/resources/pulsar-semantics.xml
    +++ b/tests/integration/src/test/resources/pulsar-semantics.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    +  
    +    
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-sql.xml b/tests/integration/src/test/resources/pulsar-sql.xml
    index 1ab4d479ad041..a7e0995f1f2d0 100644
    --- a/tests/integration/src/test/resources/pulsar-sql.xml
    +++ b/tests/integration/src/test/resources/pulsar-sql.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-standalone.xml b/tests/integration/src/test/resources/pulsar-standalone.xml
    index d8892c0746181..45d79a72821b5 100644
    --- a/tests/integration/src/test/resources/pulsar-standalone.xml
    +++ b/tests/integration/src/test/resources/pulsar-standalone.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-thread.xml b/tests/integration/src/test/resources/pulsar-thread.xml
    index cf3da15e8e1ad..dc1b8e3b3de15 100644
    --- a/tests/integration/src/test/resources/pulsar-thread.xml
    +++ b/tests/integration/src/test/resources/pulsar-thread.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -            
    -
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +      
    +      
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar-tls.xml b/tests/integration/src/test/resources/pulsar-tls.xml
    index 153d14b62a725..88e90c43107fb 100644
    --- a/tests/integration/src/test/resources/pulsar-tls.xml
    +++ b/tests/integration/src/test/resources/pulsar-tls.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    +  
    +    
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-transaction.xml b/tests/integration/src/test/resources/pulsar-transaction.xml
    index 72c375d000d8e..425f58ebda639 100644
    --- a/tests/integration/src/test/resources/pulsar-transaction.xml
    +++ b/tests/integration/src/test/resources/pulsar-transaction.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    +  
    +    
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-upgrade.xml b/tests/integration/src/test/resources/pulsar-upgrade.xml
    index a52db54753372..cb312b04ed7a3 100644
    --- a/tests/integration/src/test/resources/pulsar-upgrade.xml
    +++ b/tests/integration/src/test/resources/pulsar-upgrade.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    +  
    +    
    +      
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/pulsar-websocket.xml b/tests/integration/src/test/resources/pulsar-websocket.xml
    index 87bf832d4e40a..e5023451518e0 100644
    --- a/tests/integration/src/test/resources/pulsar-websocket.xml
    +++ b/tests/integration/src/test/resources/pulsar-websocket.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/pulsar.xml b/tests/integration/src/test/resources/pulsar.xml
    index 7fbe9fa24d8d9..ada31c2e5da8c 100644
    --- a/tests/integration/src/test/resources/pulsar.xml
    +++ b/tests/integration/src/test/resources/pulsar.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -        
    -    
    +  
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +    
    +  
     
    diff --git a/tests/integration/src/test/resources/tiered-filesystem-storage.xml b/tests/integration/src/test/resources/tiered-filesystem-storage.xml
    index b14077594e0c9..c8cb694ea2d02 100644
    --- a/tests/integration/src/test/resources/tiered-filesystem-storage.xml
    +++ b/tests/integration/src/test/resources/tiered-filesystem-storage.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/integration/src/test/resources/tiered-jcloud-storage.xml b/tests/integration/src/test/resources/tiered-jcloud-storage.xml
    index f61f9f5fa1612..be699ba0a8680 100644
    --- a/tests/integration/src/test/resources/tiered-jcloud-storage.xml
    +++ b/tests/integration/src/test/resources/tiered-jcloud-storage.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -        
    -    
    -
    \ No newline at end of file
    +  
    +    
    +      
    +    
    +  
    +
    diff --git a/tests/pom.xml b/tests/pom.xml
    index 8a4aec9504ff3..2db89a5a8465f 100644
    --- a/tests/pom.xml
    +++ b/tests/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    +
       pom
       4.0.0
       
    -    org.apache.pulsar
    +    io.streamnative
         pulsar
         3.0.0-SNAPSHOT
       
    -  org.apache.pulsar.tests
    +  io.streamnative.tests
       tests-parent
       Apache Pulsar :: Tests
       
    @@ -50,7 +49,7 @@
               skipIntegrationTests
             
           
    -      
    +      
         
         
           integrationTests
    diff --git a/tests/pulsar-client-admin-shade-test/pom.xml b/tests/pulsar-client-admin-shade-test/pom.xml
    index 0e7bed47db060..f6f92e3402e43 100644
    --- a/tests/pulsar-client-admin-shade-test/pom.xml
    +++ b/tests/pulsar-client-admin-shade-test/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    4.0.0
    -    
    -        org.apache.pulsar.tests
    -        tests-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-client-admin-shade-test
    -    jar
    -    Apache Pulsar :: Tests :: Pulsar-Client-Admin-Shade Test
    -
    -    
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client-admin
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client-admin-api
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.testcontainers
    -            testcontainers
    -            test
    -        
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client-messagecrypto-bc
    -            ${project.version}
    -            test
    -        
    -
    -    
    -
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative.tests
    +    tests-parent
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-client-admin-shade-test
    +  jar
    +  Apache Pulsar :: Tests :: Pulsar-Client-Admin-Shade Test
    +  
    +    
    +      io.streamnative
    +      pulsar-client-admin
    +      ${project.version}
    +      test
    +    
    +    
    +      io.streamnative
    +      pulsar-client-admin-api
    +      ${project.version}
    +      test
    +    
    +    
    +      org.testcontainers
    +      testcontainers
    +      test
    +    
    +    
    +      io.streamnative
    +      pulsar-client-messagecrypto-bc
    +      ${project.version}
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        
    +          
    +            
    +              test-jar
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-surefire-plugin
    +        
    +          
    +          true
    +          
    +            ${project.version}
    +            ${project.build.directory}
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      ShadeTests
    +      
    +        
    +          ShadeTests
    +        
    +      
    +      
             
    -            
    -                org.apache.maven.plugins
    -                maven-jar-plugin
    -                
    -                    
    -                        
    -                            test-jar
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-surefire-plugin
    -                
    -                    
    -                    true
    -                    
    -                        ${project.version}
    -                        ${project.build.directory}
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            ShadeTests
    -            
    -                
    -                    ShadeTests
    -                
    -            
    -            
    -                
    -                    
    -                        org.apache.maven.plugins
    -                        maven-surefire-plugin
    -                        
    -                            ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
    +          
    +            org.apache.maven.plugins
    +            maven-surefire-plugin
    +            
    +              ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
                                     -Dio.netty.leakDetectionLevel=advanced
                                     ${test.additional.args}
                                 
    -                            false
    -                            
    -                                src/test/resources/pulsar.xml
    -                            
    -                            1
    -                        
    -                    
    -                
    -            
    -        
    -    
    +              false
    +              
    +                src/test/resources/pulsar.xml
    +              
    +              1
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml b/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml
    index d95dd95ae1775..2476755a95f7e 100644
    --- a/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml
    +++ b/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +    
    +  
     
    diff --git a/tests/pulsar-client-all-shade-test/pom.xml b/tests/pulsar-client-all-shade-test/pom.xml
    index 2b65f49a360ae..6c0b5ae9b6b04 100644
    --- a/tests/pulsar-client-all-shade-test/pom.xml
    +++ b/tests/pulsar-client-all-shade-test/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    4.0.0
    -    
    -        org.apache.pulsar.tests
    -        tests-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-client-all-shade-test
    -    jar
    -    Apache Pulsar :: Tests :: Pulsar-Client-All-Shade Test
    -
    -    
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client-all
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.testcontainers
    -            testcontainers
    -            test
    -        
    -
    -        
    -            org.apache.pulsar
    -            bouncy-castle-bc
    -            ${project.version}
    -            pkg
    -        
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client-messagecrypto-bc
    -            ${project.version}
    -        
    -
    -    
    -
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative.tests
    +    tests-parent
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-client-all-shade-test
    +  jar
    +  Apache Pulsar :: Tests :: Pulsar-Client-All-Shade Test
    +  
    +    
    +      io.streamnative
    +      pulsar-client-all
    +      ${project.version}
    +      test
    +    
    +    
    +      org.testcontainers
    +      testcontainers
    +      test
    +    
    +    
    +      io.streamnative
    +      bouncy-castle-bc
    +      ${project.version}
    +      pkg
    +    
    +    
    +      io.streamnative
    +      pulsar-client-messagecrypto-bc
    +      ${project.version}
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        
    +          
    +            
    +              test-jar
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-surefire-plugin
    +        
    +          
    +          true
    +          
    +            ${project.version}
    +            ${project.build.directory}
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      ShadeTests
    +      
    +        
    +          ShadeTests
    +        
    +      
    +      
             
    -            
    -                org.apache.maven.plugins
    -                maven-jar-plugin
    -                
    -                    
    -                        
    -                            test-jar
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-surefire-plugin
    -                
    -                    
    -                    true
    -                    
    -                        ${project.version}
    -                        ${project.build.directory}
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            ShadeTests
    -            
    -                
    -                    ShadeTests
    -                
    -            
    -            
    -                
    -                    
    -                        org.apache.maven.plugins
    -                        maven-surefire-plugin
    -                        
    -                            ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
    +          
    +            org.apache.maven.plugins
    +            maven-surefire-plugin
    +            
    +              ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
                                     -Dio.netty.leakDetectionLevel=advanced
                                     ${test.additional.args}
                                 
    -                            false
    -                            
    -                                src/test/resources/pulsar.xml
    -                            
    -                            1
    -                        
    -                    
    -                
    -            
    -        
    -    
    +              false
    +              
    +                src/test/resources/pulsar.xml
    +              
    +              1
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml b/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml
    index 5c725f80eaea6..cfbc5ce59488b 100644
    --- a/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml
    +++ b/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +    
    +  
     
    diff --git a/tests/pulsar-client-shade-test/pom.xml b/tests/pulsar-client-shade-test/pom.xml
    index 6efd88f4ac762..4ae9800292445 100644
    --- a/tests/pulsar-client-shade-test/pom.xml
    +++ b/tests/pulsar-client-shade-test/pom.xml
    @@ -1,4 +1,4 @@
    -
    +
     
    -
    -    4.0.0
    -
    -    
    -        org.apache.pulsar.tests
    -        tests-parent
    -        3.0.0-SNAPSHOT
    -    
    -
    -    pulsar-client-shade-test
    -    jar
    -    Apache Pulsar :: Tests :: Pulsar-Client-Shade Test
    -
    -    
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.apache.pulsar
    -            pulsar-client-admin
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.testcontainers
    -            testcontainers
    -            test
    -        
    -
    -    
    -
    -    
    +
    +  4.0.0
    +  
    +    io.streamnative.tests
    +    tests-parent
    +    3.0.0-SNAPSHOT
    +  
    +  pulsar-client-shade-test
    +  jar
    +  Apache Pulsar :: Tests :: Pulsar-Client-Shade Test
    +  
    +    
    +      io.streamnative
    +      pulsar-client
    +      ${project.version}
    +      test
    +    
    +    
    +      io.streamnative
    +      pulsar-client-admin
    +      ${project.version}
    +      test
    +    
    +    
    +      org.testcontainers
    +      testcontainers
    +      test
    +    
    +  
    +  
    +    
    +      
    +        org.apache.maven.plugins
    +        maven-jar-plugin
    +        
    +          
    +            
    +              test-jar
    +            
    +          
    +        
    +      
    +      
    +        org.apache.maven.plugins
    +        maven-surefire-plugin
    +        
    +          
    +          true
    +          
    +            ${project.version}
    +            ${project.build.directory}
    +          
    +        
    +      
    +    
    +  
    +  
    +    
    +      ShadeTests
    +      
    +        
    +          ShadeTests
    +        
    +      
    +      
             
    -            
    -                org.apache.maven.plugins
    -                maven-jar-plugin
    -                
    -                    
    -                        
    -                            test-jar
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-surefire-plugin
    -                
    -                    
    -                    true
    -                    
    -                        ${project.version}
    -                        ${project.build.directory}
    -                    
    -                
    -            
    -        
    -    
    -
    -    
    -        
    -            ShadeTests
    -            
    -                
    -                    ShadeTests
    -                
    -            
    -            
    -                
    -                    
    -                        org.apache.maven.plugins
    -                        maven-surefire-plugin
    -                        
    -                            ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
    +          
    +            org.apache.maven.plugins
    +            maven-surefire-plugin
    +            
    +              ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G
                                     -Dio.netty.leakDetectionLevel=advanced
                                     ${test.additional.args}
                                 
    -                            false
    -                            
    -                                src/test/resources/pulsar.xml
    -                            
    -                            1
    -                        
    -                    
    -                
    -            
    -        
    -    
    +              false
    +              
    +                src/test/resources/pulsar.xml
    +              
    +              1
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml b/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml
    index 6132ad05f6ccf..e2ac6a9ee4f30 100644
    --- a/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml
    +++ b/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
     
    -    
    -        
    -            
    -            
    -        
    -    
    +  
    +    
    +      
    +      
    +    
    +  
     
    diff --git a/tiered-storage/file-system/pom.xml b/tiered-storage/file-system/pom.xml
    index 745da1a95c70d..9ce9353d9d4fc 100644
    --- a/tiered-storage/file-system/pom.xml
    +++ b/tiered-storage/file-system/pom.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    -    4.0.0
    -
    -    
    -        org.apache.pulsar
    -        tiered-storage-parent
    -        3.0.0-SNAPSHOT
    -        ..
    -    
    -
    -    tiered-storage-file-system
    -    Apache Pulsar :: Tiered Storage :: File System
    -    
    -        
    -            ${project.groupId}
    -            managed-ledger
    -            ${project.version}
    -        
    -        
    -            org.apache.hadoop
    -            hadoop-common
    -            ${hdfs-offload-version3}
    -            
    -                
    -                    log4j
    -                    log4j
    -                
    -                
    -                    org.slf4j
    -                    slf4j-log4j12
    -                
    -            
    -        
    -        
    -        
    -            org.apache.avro
    -            avro
    -            ${avro.version}
    -        
    -        
    -        
    -            net.minidev
    -            json-smart
    -            ${json-smart.version}
    -        
    -        
    -            com.google.protobuf
    -            protobuf-java
    -        
    -
    -        
    -            ${project.groupId}
    -            testmocks
    -            ${project.version}
    -            test
    -        
    -
    -        
    -            org.apache.hadoop
    -            hadoop-minicluster
    -            ${hdfs-offload-version3}
    -            test
    -            
    -              
    -                io.netty
    -                netty-all
    -              
    -            
    -        
    -
    -        
    +
    +  4.0.0
    +  
    +    io.streamnative
    +    tiered-storage-parent
    +    3.0.0-SNAPSHOT
    +    ..
    +  
    +  tiered-storage-file-system
    +  Apache Pulsar :: Tiered Storage :: File System
    +  
    +    
    +      ${project.groupId}
    +      managed-ledger
    +      ${project.version}
    +    
    +    
    +      org.apache.hadoop
    +      hadoop-common
    +      ${hdfs-offload-version3}
    +      
    +        
    +          log4j
    +          log4j
    +        
    +        
    +          org.slf4j
    +          slf4j-log4j12
    +        
    +      
    +    
    +    
    +      org.apache.hadoop
    +      hadoop-hdfs-client
    +      ${hdfs-offload-version3}
    +      
    +        
    +          org.apache.avro
    +          avro
    +        
    +        
    +          org.mortbay.jetty
    +          jetty
    +        
    +        
    +          com.sun.jersey
    +          jersey-core
    +        
    +        
    +          com.sun.jersey
    +          jersey-server
    +        
    +        
    +          javax.servlet
    +          servlet-api
    +        
    +      
    +    
    +    
    +    
    +      org.apache.avro
    +      avro
    +      ${avro.version}
    +    
    +    
    +    
    +      net.minidev
    +      json-smart
    +      ${json-smart.version}
    +    
    +    
    +      com.google.protobuf
    +      protobuf-java
    +    
    +    
    +      ${project.groupId}
    +      testmocks
    +      ${project.version}
    +      test
    +    
    +    
    +      org.apache.hadoop
    +      hadoop-minicluster
    +      ${hdfs-offload-version3}
    +      test
    +      
    +        
               io.netty
    -          netty-codec-http
    -        
    -
    -        
    -            org.eclipse.jetty
    -            jetty-server
    -            test
    -        
    -        
    -            org.eclipse.jetty
    -            jetty-alpn-conscrypt-server
    -            test
    -        
    -        
    -            org.eclipse.jetty
    -            jetty-servlet
    -            test
    -        
    -        
    -            org.eclipse.jetty
    -            jetty-util
    -            test
    -        
    -    
    -
    -    
    -        
    -            
    -                org.apache.nifi
    -                nifi-nar-maven-plugin
    -            
    -            
    -                com.github.spotbugs
    -                spotbugs-maven-plugin
    -                ${spotbugs-maven-plugin.version}
    -                
    -                    ${basedir}/src/main/resources/findbugsExclude.xml
    -                
    -                
    -                    
    -                        spotbugs
    -                        verify
    -                        
    -                            check
    -                        
    -                    
    -                
    -            
    -            
    -                org.apache.maven.plugins
    -                maven-checkstyle-plugin
    -                
    -                    
    -                        checkstyle
    -                        verify
    -                        
    -                            check
    -                        
    -                    
    -                
    -            
    -        
    -    
    -    
    -        
    -        
    -            owasp-dependency-check
    -            
    -                
    -                    
    -                        org.owasp
    -                        dependency-check-maven
    -                        ${dependency-check-maven.version}
    -                        
    -                            
    -                                
    -                                    aggregate
    -                                
    -                                none
    -                            
    -                        
    -                    
    -                
    -            
    -        
    -    
    +    
    +      owasp-dependency-check
    +      
    +        
    +          
    +            org.owasp
    +            dependency-check-maven
    +            ${dependency-check-maven.version}
    +            
    +              
    +                
    +                  aggregate
    +                
    +                none
    +              
    +            
    +          
    +        
    +      
    +    
    +  
     
    diff --git a/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java b/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java
    index bdeb88e9ac93c..e31c62b2bd3c8 100644
    --- a/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java
    +++ b/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java
    @@ -28,6 +28,7 @@
     import java.util.concurrent.ExecutorService;
     import java.util.concurrent.ScheduledExecutorService;
     import java.util.concurrent.TimeUnit;
    +import java.util.concurrent.atomic.AtomicReference;
     import org.apache.bookkeeper.client.BKException;
     import org.apache.bookkeeper.client.api.LastConfirmedAndEntry;
     import org.apache.bookkeeper.client.api.LedgerEntries;
    @@ -37,9 +38,11 @@
     import org.apache.bookkeeper.client.impl.LedgerEntriesImpl;
     import org.apache.bookkeeper.client.impl.LedgerEntryImpl;
     import org.apache.bookkeeper.mledger.LedgerOffloaderStats;
    +import org.apache.bookkeeper.mledger.ManagedLedgerException;
     import org.apache.hadoop.io.BytesWritable;
     import org.apache.hadoop.io.LongWritable;
     import org.apache.hadoop.io.MapFile;
    +import org.apache.pulsar.common.naming.TopicName;
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
     
    @@ -51,6 +54,13 @@ public class FileStoreBackedReadHandleImpl implements ReadHandle {
         private final LedgerMetadata ledgerMetadata;
         private final LedgerOffloaderStats offloaderStats;
         private final String managedLedgerName;
    +    private final String topicName;
    +    enum State {
    +        Opened,
    +        Closed
    +    }
    +    private volatile State state;
    +    private final AtomicReference> closeFuture = new AtomicReference<>();
     
         private FileStoreBackedReadHandleImpl(ExecutorService executor, MapFile.Reader reader, long ledgerId,
                                               LedgerOffloaderStats offloaderStats,
    @@ -60,15 +70,17 @@ private FileStoreBackedReadHandleImpl(ExecutorService executor, MapFile.Reader r
             this.reader = reader;
             this.offloaderStats = offloaderStats;
             this.managedLedgerName = managedLedgerName;
    +        this.topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
             LongWritable key = new LongWritable();
             BytesWritable value = new BytesWritable();
             try {
                 key.set(FileSystemManagedLedgerOffloader.METADATA_KEY_INDEX);
                 long startReadIndexTime = System.nanoTime();
                 reader.get(key, value);
    -            offloaderStats.recordReadOffloadIndexLatency(managedLedgerName,
    +            offloaderStats.recordReadOffloadIndexLatency(topicName,
                         System.nanoTime() - startReadIndexTime, TimeUnit.NANOSECONDS);
                 this.ledgerMetadata = parseLedgerMetadata(ledgerId, value.copyBytes());
    +            state = State.Opened;
             } catch (IOException e) {
                 log.error("Fail to read LedgerMetadata for ledgerId {}",
                         ledgerId);
    @@ -89,15 +101,20 @@ public LedgerMetadata getLedgerMetadata() {
     
         @Override
         public CompletableFuture closeAsync() {
    -        CompletableFuture promise = new CompletableFuture<>();
    +        if (closeFuture.get() != null || !closeFuture.compareAndSet(null, new CompletableFuture<>())) {
    +            return closeFuture.get();
    +        }
    +
    +        CompletableFuture promise = closeFuture.get();
             executor.execute(() -> {
    -                try {
    -                    reader.close();
    -                    promise.complete(null);
    -                } catch (IOException t) {
    -                    promise.completeExceptionally(t);
    -                }
    -            });
    +            try {
    +                reader.close();
    +                state = State.Closed;
    +                promise.complete(null);
    +            } catch (IOException t) {
    +                promise.completeExceptionally(t);
    +            }
    +        });
             return promise;
         }
     
    @@ -108,6 +125,12 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr
             }
             CompletableFuture promise = new CompletableFuture<>();
             executor.execute(() -> {
    +            if (state == State.Closed) {
    +                log.warn("Reading a closed read handler. Ledger ID: {}, Read range: {}-{}",
    +                        ledgerId, firstEntry, lastEntry);
    +                promise.completeExceptionally(new ManagedLedgerException.OffloadReadHandleClosedException());
    +                return;
    +            }
                 if (firstEntry > lastEntry
                         || firstEntry < 0
                         || lastEntry > getLastAddConfirmed()) {
    @@ -125,7 +148,7 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr
                     while (entriesToRead > 0) {
                         long startReadTime = System.nanoTime();
                         reader.next(key, value);
    -                    this.offloaderStats.recordReadOffloadDataLatency(managedLedgerName,
    +                    this.offloaderStats.recordReadOffloadDataLatency(topicName,
                                 System.nanoTime() - startReadTime, TimeUnit.NANOSECONDS);
                         int length = value.getLength();
                         long entryId = key.get();
    @@ -135,7 +158,7 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr
                             buf.writeBytes(value.copyBytes());
                             entriesToRead--;
                             nextExpectedId++;
    -                        this.offloaderStats.recordReadOffloadBytes(managedLedgerName, length);
    +                        this.offloaderStats.recordReadOffloadBytes(topicName, length);
                         } else if (entryId > lastEntry) {
                             log.info("Expected to read {}, but read {}, which is greater than last entry {}",
                                     nextExpectedId, entryId, lastEntry);
    @@ -144,7 +167,7 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr
                 }
                     promise.complete(LedgerEntriesImpl.create(entries));
                 } catch (Throwable t) {
    -                this.offloaderStats.recordReadOffloadError(managedLedgerName);
    +                this.offloaderStats.recordReadOffloadError(topicName);
                     promise.completeExceptionally(t);
                     entries.forEach(LedgerEntry::close);
                 }
    diff --git a/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileSystemManagedLedgerOffloader.java b/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileSystemManagedLedgerOffloader.java
    index d5e09ba725421..25b63374946c8 100644
    --- a/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileSystemManagedLedgerOffloader.java
    +++ b/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileSystemManagedLedgerOffloader.java
    @@ -45,6 +45,7 @@
     import org.apache.hadoop.io.IOUtils;
     import org.apache.hadoop.io.LongWritable;
     import org.apache.hadoop.io.MapFile;
    +import org.apache.pulsar.common.naming.TopicName;
     import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl;
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
    @@ -197,9 +198,10 @@ public void run() {
                     return;
                 }
                 long ledgerId = readHandle.getId();
    -            final String topicName = extraMetadata.get(MANAGED_LEDGER_NAME);
    -            String storagePath = getStoragePath(storageBasePath, topicName);
    +            final String managedLedgerName = extraMetadata.get(MANAGED_LEDGER_NAME);
    +            String storagePath = getStoragePath(storageBasePath, managedLedgerName);
                 String dataFilePath = getDataFilePath(storagePath, ledgerId, uuid);
    +            final String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
                 LongWritable key = new LongWritable();
                 BytesWritable value = new BytesWritable();
                 try {
    @@ -241,7 +243,7 @@ public void run() {
                     promise.complete(null);
                 } catch (Exception e) {
                     log.error("Exception when get CompletableFuture : ManagerLedgerName: {}, "
    -                        + "LedgerId: {}, UUID: {} ", topicName, ledgerId, uuid, e);
    +                        + "LedgerId: {}, UUID: {} ", managedLedgerName, ledgerId, uuid, e);
                     if (e instanceof InterruptedException) {
                         Thread.currentThread().interrupt();
                     }
    @@ -306,22 +308,27 @@ public static FileSystemWriter create(LedgerEntries ledgerEntriesOnce,
             @Override
             public void run() {
                 String managedLedgerName = ledgerReader.extraMetadata.get(MANAGED_LEDGER_NAME);
    +            String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
                 if (ledgerReader.fileSystemWriteException == null) {
                     Iterator iterator = ledgerEntriesOnce.iterator();
                     while (iterator.hasNext()) {
                         LedgerEntry entry = iterator.next();
                         long entryId = entry.getEntryId();
                         key.set(entryId);
    +                    byte[] currentEntryBytes;
    +                    int currentEntrySize;
                         try {
    -                        value.set(entry.getEntryBytes(), 0, entry.getEntryBytes().length);
    +                        currentEntryBytes = entry.getEntryBytes();
    +                        currentEntrySize = currentEntryBytes.length;
    +                        value.set(currentEntryBytes, 0, currentEntrySize);
                             dataWriter.append(key, value);
                         } catch (IOException e) {
                             ledgerReader.fileSystemWriteException = e;
    -                        ledgerReader.offloaderStats.recordWriteToStorageError(managedLedgerName);
    +                        ledgerReader.offloaderStats.recordWriteToStorageError(topicName);
                             break;
                         }
                         haveOffloadEntryNumber.incrementAndGet();
    -                    ledgerReader.offloaderStats.recordOffloadBytes(managedLedgerName, entry.getLength());
    +                    ledgerReader.offloaderStats.recordOffloadBytes(topicName, currentEntrySize);
                     }
                 }
                 countDownLatch.countDown();
    @@ -367,6 +374,7 @@ public CompletableFuture deleteOffloaded(long ledgerId, UUID uid, Map promise = new CompletableFuture<>();
             try {
                 fileSystem.delete(new Path(dataFilePath), true);
    @@ -376,7 +384,7 @@ public CompletableFuture deleteOffloaded(long ledgerId, UUID uid, Map
    -                this.offloaderStats.recordDeleteOffloadOps(ledgerName, t == null));
    +                this.offloaderStats.recordDeleteOffloadOps(topicName, t == null));
         }
     
         @Override
    diff --git a/tiered-storage/file-system/src/main/resources/findbugsExclude.xml b/tiered-storage/file-system/src/main/resources/findbugsExclude.xml
    index 051f0e68e257a..1b8cc1ac799ca 100644
    --- a/tiered-storage/file-system/src/main/resources/findbugsExclude.xml
    +++ b/tiered-storage/file-system/src/main/resources/findbugsExclude.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
    -
       
    -    org.apache.pulsar
    +    io.streamnative
         tiered-storage-parent
         3.0.0-SNAPSHOT
         ..
       
    -
       tiered-storage-jcloud
       Apache Pulsar :: Tiered Storage :: JCloud
    -
       
         
           ${project.groupId}
           managed-ledger
           ${project.version}
         
    -
         
           ${project.groupId}
           jclouds-shaded
    @@ -74,7 +70,6 @@
             
           
         
    -
         
           org.apache.jclouds
           jclouds-allblobstore
    @@ -85,12 +80,10 @@
           com.amazonaws
           aws-java-sdk-core
         
    -
         
           com.amazonaws
           aws-java-sdk-sts
         
    -
         
           ${project.groupId}
           testmocks
    @@ -103,7 +96,6 @@
           ${jclouds.version}
           provided
         
    -
         
           javax.xml.bind
           jaxb-api
    @@ -115,13 +107,11 @@
           
           runtime
         
    -
         
           com.sun.activation
           javax.activation
           runtime
         
    -
       
       
         
    @@ -129,7 +119,6 @@
             org.apache.nifi
             nifi-nar-maven-plugin
           
    -
           
             com.github.spotbugs
             spotbugs-maven-plugin
    diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java
    index aa27df46c5e65..0dea46726f50a 100644
    --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java
    +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java
    @@ -26,6 +26,7 @@
     import org.apache.bookkeeper.mledger.offload.jcloud.BackedInputStream;
     import org.apache.bookkeeper.mledger.offload.jcloud.impl.DataBlockUtils.VersionCheck;
     import org.apache.pulsar.common.allocator.PulsarByteBufAllocator;
    +import org.apache.pulsar.common.naming.TopicName;
     import org.jclouds.blobstore.BlobStore;
     import org.jclouds.blobstore.domain.Blob;
     import org.jclouds.blobstore.options.GetOptions;
    @@ -44,6 +45,7 @@ public class BlobStoreBackedInputStreamImpl extends BackedInputStream {
         private final int bufferSize;
         private LedgerOffloaderStats offloaderStats;
         private String managedLedgerName;
    +    private String topicName;
     
         private long cursor;
         private long bufferOffsetStart;
    @@ -71,6 +73,7 @@ public BlobStoreBackedInputStreamImpl(BlobStore blobStore, String bucket, String
             this(blobStore, bucket, key, versionCheck, objectLen, bufferSize);
             this.offloaderStats = offloaderStats;
             this.managedLedgerName = managedLedgerName;
    +        this.topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
         }
     
         /**
    @@ -110,13 +113,13 @@ private boolean refillBufferIfNeeded() throws IOException {
                     // because JClouds streams the content
                     // and actually the HTTP call finishes when the stream is fully read
                     if (this.offloaderStats != null) {
    -                    this.offloaderStats.recordReadOffloadDataLatency(managedLedgerName,
    +                    this.offloaderStats.recordReadOffloadDataLatency(topicName,
                                 System.nanoTime() - startReadTime, TimeUnit.NANOSECONDS);
    -                    this.offloaderStats.recordReadOffloadBytes(managedLedgerName, endRange - startRange + 1);
    +                    this.offloaderStats.recordReadOffloadBytes(topicName, endRange - startRange + 1);
                     }
                 } catch (Throwable e) {
                     if (null != this.offloaderStats) {
    -                    this.offloaderStats.recordReadOffloadError(this.managedLedgerName);
    +                    this.offloaderStats.recordReadOffloadError(this.topicName);
                     }
                     throw new IOException("Error reading from BlobStore", e);
                 }
    diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java
    index cdabe5ece0ba2..5346be6a044c8 100644
    --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java
    +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java
    @@ -18,6 +18,7 @@
      */
     package org.apache.bookkeeper.mledger.offload.jcloud.impl;
     
    +import com.google.common.annotations.VisibleForTesting;
     import com.google.common.cache.Cache;
     import com.google.common.cache.CacheBuilder;
     import io.netty.buffer.ByteBuf;
    @@ -30,6 +31,7 @@
     import java.util.concurrent.ExecutorService;
     import java.util.concurrent.ScheduledExecutorService;
     import java.util.concurrent.TimeUnit;
    +import java.util.concurrent.atomic.AtomicReference;
     import org.apache.bookkeeper.client.BKException;
     import org.apache.bookkeeper.client.api.LastConfirmedAndEntry;
     import org.apache.bookkeeper.client.api.LedgerEntries;
    @@ -45,6 +47,7 @@
     import org.apache.bookkeeper.mledger.offload.jcloud.OffloadIndexBlockBuilder;
     import org.apache.bookkeeper.mledger.offload.jcloud.impl.DataBlockUtils.VersionCheck;
     import org.apache.pulsar.common.allocator.PulsarByteBufAllocator;
    +import org.apache.pulsar.common.naming.TopicName;
     import org.jclouds.blobstore.BlobStore;
     import org.jclouds.blobstore.domain.Blob;
     import org.slf4j.Logger;
    @@ -65,13 +68,14 @@ public class BlobStoreBackedReadHandleImpl implements ReadHandle {
                 .newBuilder()
                 .expireAfterAccess(CACHE_TTL_SECONDS, TimeUnit.SECONDS)
                 .build();
    +    private final AtomicReference> closeFuture = new AtomicReference<>();
     
         enum State {
             Opened,
             Closed
         }
     
    -    private State state = null;
    +    private volatile State state = null;
     
         private BlobStoreBackedReadHandleImpl(long ledgerId, OffloadIndexBlock index,
                                               BackedInputStream inputStream, ExecutorService executor) {
    @@ -95,18 +99,22 @@ public LedgerMetadata getLedgerMetadata() {
     
         @Override
         public CompletableFuture closeAsync() {
    -        CompletableFuture promise = new CompletableFuture<>();
    +        if (closeFuture.get() != null || !closeFuture.compareAndSet(null, new CompletableFuture<>())) {
    +            return closeFuture.get();
    +        }
    +
    +        CompletableFuture promise = closeFuture.get();
             executor.execute(() -> {
    -                try {
    -                    index.close();
    -                    inputStream.close();
    -                    entryOffsets.invalidateAll();
    -                    state = State.Closed;
    -                    promise.complete(null);
    -                } catch (IOException t) {
    -                    promise.completeExceptionally(t);
    -                }
    -            });
    +            try {
    +                index.close();
    +                inputStream.close();
    +                entryOffsets.invalidateAll();
    +                state = State.Closed;
    +                promise.complete(null);
    +            } catch (IOException t) {
    +                promise.completeExceptionally(t);
    +            }
    +        });
             return promise;
         }
     
    @@ -261,6 +269,7 @@ public static ReadHandle open(ScheduledExecutorService executor,
             int retryCount = 3;
             OffloadIndexBlock index = null;
             IOException lastException = null;
    +        String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
             // The following retry is used to avoid to some network issue cause read index file failure.
             // If it can not recovery in the retry, we will throw the exception and the dispatcher will schedule to
             // next read.
    @@ -269,7 +278,7 @@ public static ReadHandle open(ScheduledExecutorService executor,
             while (retryCount-- > 0) {
                 long readIndexStartTime = System.nanoTime();
                 Blob blob = blobStore.getBlob(bucket, indexKey);
    -            offloaderStats.recordReadOffloadIndexLatency(managedLedgerName,
    +            offloaderStats.recordReadOffloadIndexLatency(topicName,
                         System.nanoTime() - readIndexStartTime, TimeUnit.NANOSECONDS);
                 versionCheck.check(indexKey, blob);
                 OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create();
    @@ -296,6 +305,7 @@ public static ReadHandle open(ScheduledExecutorService executor,
         }
     
         // for testing
    +    @VisibleForTesting
         State getState() {
             return this.state;
         }
    diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java
    index 2e3d0b08970ca..53d96e08abf5e 100644
    --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java
    +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java
    @@ -30,6 +30,7 @@
     import java.util.concurrent.ExecutorService;
     import java.util.concurrent.ScheduledExecutorService;
     import java.util.concurrent.TimeUnit;
    +import java.util.concurrent.atomic.AtomicReference;
     import lombok.val;
     import org.apache.bookkeeper.client.BKException;
     import org.apache.bookkeeper.client.api.LastConfirmedAndEntry;
    @@ -46,6 +47,7 @@
     import org.apache.bookkeeper.mledger.offload.jcloud.OffloadIndexBlockV2Builder;
     import org.apache.bookkeeper.mledger.offload.jcloud.impl.DataBlockUtils.VersionCheck;
     import org.apache.pulsar.common.allocator.PulsarByteBufAllocator;
    +import org.apache.pulsar.common.naming.TopicName;
     import org.jclouds.blobstore.BlobStore;
     import org.jclouds.blobstore.domain.Blob;
     import org.slf4j.Logger;
    @@ -59,7 +61,8 @@ public class BlobStoreBackedReadHandleImplV2 implements ReadHandle {
         private final List inputStreams;
         private final List dataStreams;
         private final ExecutorService executor;
    -    private State state = null;
    +    private volatile State state = null;
    +    private final AtomicReference> closeFuture = new AtomicReference<>();
     
         enum State {
             Opened,
    @@ -122,7 +125,11 @@ public LedgerMetadata getLedgerMetadata() {
     
         @Override
         public CompletableFuture closeAsync() {
    -        CompletableFuture promise = new CompletableFuture<>();
    +        if (closeFuture.get() != null || !closeFuture.compareAndSet(null, new CompletableFuture<>())) {
    +            return closeFuture.get();
    +        }
    +
    +        CompletableFuture promise = closeFuture.get();
             executor.execute(() -> {
                 try {
                     for (OffloadIndexBlockV2 indexBlock : indices) {
    @@ -142,7 +149,9 @@ public CompletableFuture closeAsync() {
     
         @Override
         public CompletableFuture readAsync(long firstEntry, long lastEntry) {
    -        log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry);
    +        if (log.isDebugEnabled()) {
    +            log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry);
    +        }
             CompletableFuture promise = new CompletableFuture<>();
             executor.execute(() -> {
                 if (state == State.Closed) {
    @@ -297,13 +306,14 @@ public static ReadHandle open(ScheduledExecutorService executor,
                 throws IOException {
             List inputStreams = new LinkedList<>();
             List indice = new LinkedList<>();
    +        String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
             for (int i = 0; i < indexKeys.size(); i++) {
                 String indexKey = indexKeys.get(i);
                 String key = keys.get(i);
                 log.debug("open bucket: {} index key: {}", bucket, indexKey);
                 long startTime = System.nanoTime();
                 Blob blob = blobStore.getBlob(bucket, indexKey);
    -            offloaderStats.recordReadOffloadIndexLatency(managedLedgerName,
    +            offloaderStats.recordReadOffloadIndexLatency(topicName,
                         System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
                 log.debug("indexKey blob: {} {}", indexKey, blob);
                 versionCheck.check(indexKey, blob);
    diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java
    index 6d69b5edbc3fb..03898b032b403 100644
    --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java
    +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java
    @@ -63,6 +63,7 @@
     import org.apache.bookkeeper.mledger.offload.jcloud.provider.BlobStoreLocation;
     import org.apache.bookkeeper.mledger.offload.jcloud.provider.TieredStorageConfiguration;
     import org.apache.bookkeeper.mledger.proto.MLDataFormats;
    +import org.apache.pulsar.common.naming.TopicName;
     import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl;
     import org.jclouds.blobstore.BlobStore;
     import org.jclouds.blobstore.domain.Blob;
    @@ -153,11 +154,17 @@ public static BlobStoreManagedLedgerOffloader create(TieredStorageConfiguration
                     config.getProvider().getDriver(), config.getServiceEndpoint(),
                     config.getBucket(), config.getRegion());
     
    -        blobStores.putIfAbsent(config.getBlobStoreLocation(), config.getBlobStore());
             this.offloaderStats = offloaderStats;
             log.info("The ledger offloader was created.");
         }
     
    +    private BlobStore getBlobStore(BlobStoreLocation blobStoreLocation) {
    +        return blobStores.computeIfAbsent(blobStoreLocation, location -> {
    +            log.info("Creating blob store for location {}", location);
    +            return config.getBlobStore();
    +        });
    +    }
    +
         @Override
         public String getOffloadDriverName() {
             return config.getDriver();
    @@ -176,12 +183,13 @@ public Map getOffloadDriverMetadata() {
         public CompletableFuture offload(ReadHandle readHandle,
                                                UUID uuid,
                                                Map extraMetadata) {
    -        final String topicName = extraMetadata.get(MANAGED_LEDGER_NAME);
    -        final BlobStore writeBlobStore = blobStores.get(config.getBlobStoreLocation());
    -        log.info("offload {} uuid {} extraMetadata {} to {} {}", readHandle.getId(), uuid, extraMetadata,
    -                config.getBlobStoreLocation(), writeBlobStore);
    +        final String managedLedgerName = extraMetadata.get(MANAGED_LEDGER_NAME);
    +        final String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
             CompletableFuture promise = new CompletableFuture<>();
             scheduler.chooseThread(readHandle.getId()).execute(() -> {
    +            final BlobStore writeBlobStore = getBlobStore(config.getBlobStoreLocation());
    +            log.info("offload {} uuid {} extraMetadata {} to {} {}", readHandle.getId(), uuid, extraMetadata,
    +                config.getBlobStoreLocation(), writeBlobStore);
                 if (readHandle.getLength() == 0 || !readHandle.isClosed() || readHandle.getLastAddConfirmed() < 0) {
                     promise.completeExceptionally(
                             new IllegalArgumentException("An empty or open ledger should never be offloaded"));
    @@ -226,7 +234,7 @@ public CompletableFuture offload(ReadHandle readHandle,
                             .calculateBlockSize(config.getMaxBlockSizeInBytes(), readHandle, startEntry, entryBytesWritten);
     
                         try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl(
    -                            readHandle, startEntry, blockSize, this.offloaderStats, topicName)) {
    +                            readHandle, startEntry, blockSize, this.offloaderStats, managedLedgerName)) {
     
                             Payload partPayload = Payloads.newInputStreamPayload(blockStream);
                             partPayload.getContentMetadata().setContentLength((long) blockSize);
    @@ -328,7 +336,7 @@ public CompletableFuture streamingOffload(@NonNull ManagedLedger
                     driverMetadata);
             log.debug("begin offload with {}:{}", beginLedger, beginEntry);
             this.offloadResult = new CompletableFuture<>();
    -        blobStore = blobStores.get(config.getBlobStoreLocation());
    +        blobStore = getBlobStore(config.getBlobStoreLocation());
             streamingIndexBuilder = OffloadIndexBlockV2Builder.create();
             streamingDataBlockKey = segmentInfo.uuid.toString();
             streamingDataIndexKey = String.format("%s-index", segmentInfo.uuid);
    @@ -534,13 +542,13 @@ public CompletableFuture readOffloaded(long ledgerId, UUID uid,
     
             BlobStoreLocation bsKey = getBlobStoreLocation(offloadDriverMetadata);
             String readBucket = bsKey.getBucket();
    -        BlobStore readBlobstore = blobStores.get(config.getBlobStoreLocation());
     
             CompletableFuture promise = new CompletableFuture<>();
             String key = DataBlockUtils.dataBlockOffloadKey(ledgerId, uid);
             String indexKey = DataBlockUtils.indexBlockOffloadKey(ledgerId, uid);
             scheduler.chooseThread(ledgerId).execute(() -> {
                 try {
    +                BlobStore readBlobstore = getBlobStore(config.getBlobStoreLocation());
                     promise.complete(BlobStoreBackedReadHandleImpl.open(scheduler.chooseThread(ledgerId),
                             readBlobstore,
                             readBucket, key, indexKey,
    @@ -560,7 +568,6 @@ public CompletableFuture readOffloaded(long ledgerId, MLDataFormats.
                                                            Map offloadDriverMetadata) {
             BlobStoreLocation bsKey = getBlobStoreLocation(offloadDriverMetadata);
             String readBucket = bsKey.getBucket();
    -        BlobStore readBlobstore = blobStores.get(config.getBlobStoreLocation());
             CompletableFuture promise = new CompletableFuture<>();
             final List offloadSegmentList = ledgerContext.getOffloadSegmentList();
             List keys = Lists.newLinkedList();
    @@ -575,6 +582,7 @@ public CompletableFuture readOffloaded(long ledgerId, MLDataFormats.
     
             scheduler.chooseThread(ledgerId).execute(() -> {
                 try {
    +                BlobStore readBlobstore = getBlobStore(config.getBlobStoreLocation());
                     promise.complete(BlobStoreBackedReadHandleImplV2.open(scheduler.chooseThread(ledgerId),
                             readBlobstore,
                             readBucket, keys, indexKeys,
    @@ -594,11 +602,11 @@ public CompletableFuture deleteOffloaded(long ledgerId, UUID uid,
                                                        Map offloadDriverMetadata) {
             BlobStoreLocation bsKey = getBlobStoreLocation(offloadDriverMetadata);
             String readBucket = bsKey.getBucket(offloadDriverMetadata);
    -        BlobStore readBlobstore = blobStores.get(config.getBlobStoreLocation());
     
             CompletableFuture promise = new CompletableFuture<>();
             scheduler.chooseThread(ledgerId).execute(() -> {
                 try {
    +                BlobStore readBlobstore = getBlobStore(config.getBlobStoreLocation());
                     readBlobstore.removeBlobs(readBucket,
                         ImmutableList.of(DataBlockUtils.dataBlockOffloadKey(ledgerId, uid),
                                          DataBlockUtils.indexBlockOffloadKey(ledgerId, uid)));
    @@ -611,7 +619,8 @@ public CompletableFuture deleteOffloaded(long ledgerId, UUID uid,
     
             return promise.whenComplete((__, t) -> {
                 if (null != this.ml) {
    -                this.offloaderStats.recordDeleteOffloadOps(this.ml.getName(), t == null);
    +                this.offloaderStats.recordDeleteOffloadOps(
    +                  TopicName.fromPersistenceNamingEncoding(this.ml.getName()), t == null);
                 }
             });
         }
    @@ -620,11 +629,11 @@ public CompletableFuture deleteOffloaded(long ledgerId, UUID uid,
         public CompletableFuture deleteOffloaded(UUID uid, Map offloadDriverMetadata) {
             BlobStoreLocation bsKey = getBlobStoreLocation(offloadDriverMetadata);
             String readBucket = bsKey.getBucket(offloadDriverMetadata);
    -        BlobStore readBlobstore = blobStores.get(config.getBlobStoreLocation());
     
             CompletableFuture promise = new CompletableFuture<>();
             scheduler.execute(() -> {
                 try {
    +                BlobStore readBlobstore = getBlobStore(config.getBlobStoreLocation());
                     readBlobstore.removeBlobs(readBucket,
                             ImmutableList.of(uid.toString(),
                                     DataBlockUtils.indexBlockOffloadKey(uid)));
    @@ -636,7 +645,8 @@ public CompletableFuture deleteOffloaded(UUID uid, Map off
             });
     
             return promise.whenComplete((__, t) ->
    -                this.offloaderStats.recordDeleteOffloadOps(this.ml.getName(), t == null));
    +                this.offloaderStats.recordDeleteOffloadOps(
    +                  TopicName.fromPersistenceNamingEncoding(this.ml.getName()), t == null));
         }
     
         @Override
    @@ -663,7 +673,7 @@ public void scanLedgers(OffloadedLedgerMetadataConsumer consumer, Map entriesByteBuf = null;
         private LedgerOffloaderStats offloaderStats;
    +    private String managedLedgerName;
         private String topicName;
         private int currentOffset = 0;
         private final AtomicBoolean close = new AtomicBoolean(false);
    @@ -91,7 +93,8 @@ public BlockAwareSegmentInputStreamImpl(ReadHandle ledger, long startEntryId, in
                                                 LedgerOffloaderStats offloaderStats, String ledgerName) {
             this(ledger, startEntryId, blockSize);
             this.offloaderStats = offloaderStats;
    -        this.topicName = ledgerName;
    +        this.managedLedgerName = ledgerName;
    +        this.topicName = TopicName.fromPersistenceNamingEncoding(ledgerName);
         }
     
         private ByteBuf readEntries(int len) throws IOException {
    @@ -183,7 +186,7 @@ private List readNextEntriesFromLedger(long start, long maxNumberEntrie
                     log.debug("read ledger entries. start: {}, end: {} cost {}", start, end,
                             TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - startTime));
                 }
    -            if (offloaderStats != null && topicName != null) {
    +            if (offloaderStats != null && managedLedgerName != null) {
                     offloaderStats.recordReadLedgerLatency(topicName, System.nanoTime() - startTime,
                             TimeUnit.NANOSECONDS);
                 }
    diff --git a/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml b/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml
    index a8a9b9aae925f..c6ad70823a5ac 100644
    --- a/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml
    +++ b/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml
    @@ -1,3 +1,4 @@
    +
     
    -
    +
       4.0.0
       pom
       
    -    org.apache.pulsar
    +    io.streamnative
         pulsar
         3.0.0-SNAPSHOT
         ..
       
    -
       tiered-storage-parent
       Apache Pulsar :: Tiered Storage :: Parent
    -
       
         ${project.version}
       
    -
       
         jcloud
         file-system
    diff --git a/wiki/proposals/PIP.md b/wiki/proposals/PIP.md
    index e10452b107fd8..fc68905726eb8 100644
    --- a/wiki/proposals/PIP.md
    +++ b/wiki/proposals/PIP.md
    @@ -80,7 +80,7 @@ The process works in the following way:
     1. The author(s) of the proposal will create a GitHub issue ticket choosing the
        template for PIP proposals. The issue title should be "PIP-xxx: title", where
        the "xxx" number should be chosen to be the next number from the existing PIP 
    -   issues, listed [here]([url](https://github.com/apache/pulsar/labels/PIP)).
    +   issues, listed [here](https://github.com/apache/pulsar/issues?q=is%3Aissue+label%3APIP+)
     2. The author(s) will send a note to the dev@pulsar.apache.org mailing list
        to start the discussion, using subject prefix `[DISCUSS] PIP-xxx: {PIP TITLE}`. The discussion
        need to happen in the mailing list. Please avoid discussing it using
    @@ -116,38 +116,5 @@ Some examples:
     
     ## Template for a PIP design doc
     
    -```
    -## Motivation
    +Read [the template file](/.github/ISSUE_TEMPLATE/pip.md).
     
    -Explain why this change is needed, what benefits it would bring to Apache Pulsar
    -and what problem it's trying to solve.
    -
    -## Goal
    -
    -Define the scope of this proposal. Given the motivation stated above, what are
    -the problems that this proposal is addressing and what other items will be
    -considering out of scope, perhaps to be left to a different PIP.
    -
    -## API Changes
    -
    -Illustrate all the proposed changes to the API or wire protocol, with examples
    -of all the newly added classes/methods, including Javadoc.
    -
    -## Implementation
    -
    -This should be a detailed description of all the changes that are
    -expected to be made. It should be detailed enough that any developer that is
    -familiar with Pulsar internals would be able to understand all the parts of the
    -code changes for this proposal.
    -
    -This should also serve as documentation for any person that is trying to
    -understand or debug the behavior of a certain feature.
    -
    -
    -## Reject Alternatives
    -
    -If there are alternatives that were already considered by the authors or,
    -after the discussion, by the community, and were rejected, please list them
    -here along with the reason why they were rejected.
    -
    -```