diff --git a/.github/workflows/flow-build-application.yaml b/.github/workflows/flow-build-application.yaml index 3e1bc2386..5a90a28bc 100644 --- a/.github/workflows/flow-build-application.yaml +++ b/.github/workflows/flow-build-application.yaml @@ -91,7 +91,7 @@ jobs: coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-mirror-node-test-subdir }} coverage-report-name: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} - e2e-node-pem-stop-add-tests: + e2e-node-pem-stop-tests: name: E2E Tests if: ${{ github.event_name == 'push' || github.event.inputs.enable-e2e-tests == 'true' }} uses: ./.github/workflows/zxc-e2e-test.yaml @@ -99,12 +99,12 @@ jobs: - env-vars - code-style with: - custom-job-label: Node PEM Stop Add - npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-coverage-report }} + custom-job-label: Node PEM Stop + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pem-stop-coverage-report }} - e2e-node-pfx-kill-add-tests: + e2e-node-pfx-kill-tests: name: E2E Tests if: ${{ github.event_name == 'push' || github.event.inputs.enable-e2e-tests == 'true' }} uses: ./.github/workflows/zxc-e2e-test.yaml @@ -112,10 +112,10 @@ jobs: - env-vars - code-style with: - custom-job-label: Node PFX Kill Add - npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} - coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} - coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-coverage-report }} + custom-job-label: Node PFX Kill + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-tests: name: E2E Tests @@ -129,6 +129,18 @@ jobs: coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} + e2e-node-add-tests: + name: E2E Tests + uses: ./.github/workflows/zxc-e2e-test.yaml + needs: + - env-vars + - code-style + with: + custom-job-label: Node Add + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-relay-tests: name: E2E Tests if: ${{ github.event_name == 'push' || github.event.inputs.enable-e2e-tests == 'true' }} @@ -151,9 +163,10 @@ jobs: - unit-tests - e2e-tests - e2e-mirror-node-tests - - e2e-node-pem-stop-add-tests - - e2e-node-pfx-kill-add-tests + - e2e-node-pem-stop-tests + - e2e-node-pfx-kill-tests - e2e-node-local-build-tests + - e2e-node-add-tests - e2e-relay-tests if: ${{ (github.event_name == 'push' || github.event.inputs.enable-unit-tests == 'true' || github.event.inputs.enable-e2e-tests == 'true') && !failure() && !cancelled() }} with: @@ -164,15 +177,17 @@ jobs: enable-e2e-coverage-report: ${{ github.event_name == 'push' || github.event.inputs.enable-e2e-tests == 'true' }} e2e-test-subdir: ${{ needs.env-vars.outputs.e2e-test-subdir }} e2e-mirror-node-test-subdir: ${{ needs.env-vars.outputs.e2e-mirror-node-test-subdir }} - e2e-node-pem-stop-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - e2e-node-pfx-kill-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} + e2e-node-pem-stop-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} + e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} - e2e-node-pem-stop-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-coverage-report }} - e2e-node-pfx-kill-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-coverage-report }} + e2e-node-pem-stop-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-coverage-report }} + e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} + e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} secrets: snyk-token: ${{ secrets.SNYK_TOKEN }} diff --git a/.github/workflows/flow-pull-request-checks.yaml b/.github/workflows/flow-pull-request-checks.yaml index dc9ef43f0..c6079ef48 100644 --- a/.github/workflows/flow-pull-request-checks.yaml +++ b/.github/workflows/flow-pull-request-checks.yaml @@ -78,29 +78,29 @@ jobs: coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-mirror-node-test-subdir }} coverage-report-name: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} - e2e-node-pem-stop-add-tests: + e2e-node-pem-stop-tests: name: E2E Tests uses: ./.github/workflows/zxc-e2e-test.yaml needs: - env-vars - code-style with: - custom-job-label: Node PEM Stop Add - npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-coverage-report }} + custom-job-label: Node PEM Stop + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pem-stop-coverage-report }} - e2e-node-pfx-kill-add-tests: + e2e-node-pfx-kill-tests: name: E2E Tests uses: ./.github/workflows/zxc-e2e-test.yaml needs: - env-vars - code-style with: - custom-job-label: Node PFX Kill Add - npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} - coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} - coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-coverage-report }} + custom-job-label: Node PFX Kill + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-tests: name: E2E Tests @@ -114,6 +114,18 @@ jobs: coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} + e2e-node-add-tests: + name: E2E Tests + uses: ./.github/workflows/zxc-e2e-test.yaml + needs: + - env-vars + - code-style + with: + custom-job-label: Node Add + npm-test-script: test-${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + coverage-subdirectory: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} + coverage-report-name: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} + e2e-relay-tests: name: E2E Tests if: ${{ !cancelled() && always() }} @@ -136,9 +148,10 @@ jobs: - unit-tests - e2e-tests - e2e-mirror-node-tests - - e2e-node-pem-stop-add-tests - - e2e-node-pfx-kill-add-tests + - e2e-node-pem-stop-tests + - e2e-node-pfx-kill-tests - e2e-node-local-build-tests + - e2e-node-add-tests - e2e-relay-tests if: ${{ github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name }} with: @@ -147,15 +160,17 @@ jobs: enable-e2e-coverage-report: true e2e-test-subdir: ${{ needs.env-vars.outputs.e2e-test-subdir }} e2e-mirror-node-test-subdir: ${{ needs.env-vars.outputs.e2e-mirror-node-test-subdir }} - e2e-node-pem-stop-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - e2e-node-pfx-kill-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} + e2e-node-pem-stop-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} + e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} - e2e-node-pem-stop-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-coverage-report }} - e2e-node-pfx-kill-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-coverage-report }} + e2e-node-pem-stop-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-coverage-report }} + e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} + e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} secrets: codecov-token: ${{ secrets.CODECOV_TOKEN }} @@ -168,9 +183,10 @@ jobs: - unit-tests - e2e-tests - e2e-mirror-node-tests - - e2e-node-pem-stop-add-tests - - e2e-node-pfx-kill-add-tests + - e2e-node-pem-stop-tests + - e2e-node-pfx-kill-tests - e2e-node-local-build-tests + - e2e-node-add-tests - e2e-relay-tests if: ${{ github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name }} with: @@ -179,15 +195,17 @@ jobs: enable-e2e-coverage-report: true e2e-test-subdir: ${{ needs.env-vars.outputs.e2e-test-subdir }} e2e-mirror-node-test-subdir: ${{ needs.env-vars.outputs.e2e-mirror-node-test-subdir }} - e2e-node-pem-stop-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} - e2e-node-pfx-kill-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} + e2e-node-pem-stop-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} + e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} + e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} - e2e-node-pem-stop-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-coverage-report }} - e2e-node-pfx-kill-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-coverage-report }} + e2e-node-pem-stop-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-coverage-report }} + e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} + e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} secrets: codacy-project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} @@ -200,8 +218,10 @@ jobs: # - unit-tests # - e2e-tests # - e2e-mirror-node-tests -# - e2e-node-pem-stop-add-tests -# - e2e-node-pfx-kill-add-tests +# - e2e-node-pem-stop-tests +# - e2e-node-pfx-kill-tests +# - e2e-node-local-build-tests +# - e2e-node-add-tests # - e2e-relay-tests # if: ${{ github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name && github.actor != 'dependabot[bot]' }} # with: @@ -209,14 +229,17 @@ jobs: # enable-snyk-scan: true # e2e-test-subdir: ${{ needs.env-vars.outputs.e2e-test-subdir }} # e2e-mirror-node-test-subdir: ${{ needs.env-vars.outputs.e2e-mirror-node-test-subdir }} -# e2e-node-pem-stop-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-test-subdir }} -# e2e-node-pfx-kill-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-test-subdir }} +# e2e-node-pem-stop-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pem-stop-test-subdir }} +# e2e-node-pfx-kill-test-subdir: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-test-subdir }} +# e2e-node-local-build-test-subdir: ${{ needs.env-vars.outputs.e2e-node-local-build-test-subdir }} +# e2e-node-add-test-subdir: ${{ needs.env-vars.outputs.e2e-node-add-test-subdir }} # e2e-relay-test-subdir: ${{ needs.env-vars.outputs.e2e-relay-test-subdir }} # e2e-coverage-report: ${{ needs.env-vars.outputs.e2e-coverage-report }} # e2e-mirror-node-coverage-report: ${{ needs.env-vars.outputs.e2e-mirror-node-coverage-report }} -# e2e-node-pem-stop-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-add-coverage-report }} -# e2e-node-pfx-kill-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-add-coverage-report }} +# e2e-node-pem-stop-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pem-stop-coverage-report }} +# e2e-node-pfx-kill-coverage-report: ${{ needs.env-vars.outputs.e2e-node-pfx-kill-coverage-report }} # e2e-node-local-build-coverage-report: ${{ needs.env-vars.outputs.e2e-node-local-build-coverage-report }} +# e2e-node-add-coverage-report: ${{ needs.env-vars.outputs.e2e-node-add-coverage-report }} # e2e-relay-coverage-report: ${{ needs.env-vars.outputs.e2e-relay-coverage-report }} # secrets: # snyk-token: ${{ secrets.SNYK_TOKEN }} diff --git a/.github/workflows/zxc-code-analysis.yaml b/.github/workflows/zxc-code-analysis.yaml index 5033009f5..23877e7f6 100644 --- a/.github/workflows/zxc-code-analysis.yaml +++ b/.github/workflows/zxc-code-analysis.yaml @@ -65,21 +65,26 @@ on: type: string required: false default: "e2e-mirror-node" - e2e-node-pem-stop-add-test-subdir: - description: "E2E Node PEM Stop Add Test Subdirectory:" + e2e-node-pem-stop-test-subdir: + description: "E2E Node PEM Stop Test Subdirectory:" type: string required: false - default: "e2e-node-pem-stop-add" - e2e-node-pfx-kill-add-test-subdir: - description: "E2E Node PFX Kill Add Test Subdirectory:" + default: "e2e-node-pem-stop" + e2e-node-pfx-kill-test-subdir: + description: "E2E Node PFX Kill Test Subdirectory:" type: string required: false - default: "e2e-node-pfx-kill-add" + default: "e2e-node-pfx-kill" e2e-node-local-build-test-subdir: description: "E2E Node Local Build Test Subdirectory:" type: string required: false default: "e2e-node-local-build" + e2e-node-add-test-subdir: + description: "E2E Node Add Test Subdirectory:" + type: string + required: false + default: "e2e-node-add" e2e-relay-test-subdir: description: "E2E Relay Test Subdirectory:" type: string @@ -95,21 +100,26 @@ on: type: string required: false default: "E2E Mirror Node Tests Coverage Report" - e2e-node-pem-stop-add-coverage-report: - description: "E2E Node PEM Stop Add Coverage Report:" + e2e-node-pem-stop-coverage-report: + description: "E2E Node PEM Stop Coverage Report:" type: string required: false - default: "E2E Node PEM Stop Add Tests Coverage Report" - e2e-node-pfx-kill-add-coverage-report: - description: "E2E Node PFX Kill Add Coverage Report:" + default: "E2E Node PEM Stop Tests Coverage Report" + e2e-node-pfx-kill-coverage-report: + description: "E2E Node PFX Kill Coverage Report:" type: string required: false - default: "E2E Node PFX Kill Add Tests Coverage Report" + default: "E2E Node PFX Kill Tests Coverage Report" e2e-node-local-build-coverage-report: description: "E2E Node Local Build Coverage Report:" type: string required: false default: "E2E Node Local Build Tests Coverage Report" + e2e-node-add-coverage-report: + description: "E2E Node Add Coverage Report:" + type: string + required: false + default: "E2E Node Add Tests Coverage Report" e2e-relay-coverage-report: description: "E2E Relay Coverage Report:" type: string @@ -179,19 +189,19 @@ jobs: name: ${{ inputs.e2e-mirror-node-coverage-report }} path: 'coverage/${{ inputs.e2e-mirror-node-test-subdir }}' - - name: Download E2E Node PEM Stop Add Coverage Report + - name: Download E2E Node PEM Stop Coverage Report uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 if: ${{ (inputs.enable-codecov-analysis || inputs.enable-codacy-coverage) && inputs.enable-e2e-coverage-report && !cancelled() && !failure() }} with: - name: ${{ inputs.e2e-node-pem-stop-add-coverage-report }} - path: 'coverage/${{ inputs.e2e-node-pem-stop-add-test-subdir }}' + name: ${{ inputs.e2e-node-pem-stop-coverage-report }} + path: 'coverage/${{ inputs.e2e-node-pem-stop-test-subdir }}' - - name: Download E2E Node PFX Kill Add Coverage Report + - name: Download E2E Node PFX Kill Coverage Report uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 if: ${{ (inputs.enable-codecov-analysis || inputs.enable-codacy-coverage) && inputs.enable-e2e-coverage-report && !cancelled() && !failure() }} with: - name: ${{ inputs.e2e-node-pfx-kill-add-coverage-report }} - path: 'coverage/${{ inputs.e2e-node-pfx-kill-add-test-subdir }}' + name: ${{ inputs.e2e-node-pfx-kill-coverage-report }} + path: 'coverage/${{ inputs.e2e-node-pfx-kill-test-subdir }}' - name: Download E2E Relay Coverage Report uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -204,9 +214,16 @@ jobs: uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 if: ${{ (inputs.enable-codecov-analysis || inputs.enable-codacy-coverage) && inputs.enable-e2e-coverage-report && !cancelled() && !failure() }} with: - name: ${{ inputs.e2e-node-local-build-test-coverage-report }} + name: ${{ inputs.e2e-node-local-build-coverage-report }} path: 'coverage/${{ inputs.e2e-node-local-build-test-subdir }}' + - name: Download E2E Add Coverage Report + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + if: ${{ (inputs.enable-codecov-analysis || inputs.enable-codacy-coverage) && inputs.enable-e2e-coverage-report && !cancelled() && !failure() }} + with: + name: ${{ inputs.e2e-node-add-coverage-report }} + path: 'coverage/${{ inputs.e2e-node-add-test-subdir }}' + - name: Publish To Codecov uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 if: ${{ inputs.enable-codecov-analysis && !cancelled() && !failure() }} diff --git a/.github/workflows/zxc-env-vars.yaml b/.github/workflows/zxc-env-vars.yaml index 45cea6797..6647e240f 100644 --- a/.github/workflows/zxc-env-vars.yaml +++ b/.github/workflows/zxc-env-vars.yaml @@ -32,15 +32,18 @@ on: e2e-mirror-node-test-subdir: description: "E2E Mirror Node Test Subdirectory" value: ${{ jobs.env-vars.outputs.e2e_mirror_node_test_subdir }} - e2e-node-pem-stop-add-test-subdir: - description: "E2E Node PEM Stop Add Test Subdirectory" - value: ${{ jobs.env-vars.outputs.e2e_node_pem_stop_add_test_subdir }} - e2e-node-pfx-kill-add-test-subdir: - description: "E2E Node PFX Kill Add Test Subdirectory" - value: ${{ jobs.env-vars.outputs.e2e_node_pfx_kill_add_test_subdir }} + e2e-node-pem-stop-test-subdir: + description: "E2E Node PEM Stop Test Subdirectory" + value: ${{ jobs.env-vars.outputs.e2e_node_pem_stop_test_subdir }} + e2e-node-pfx-kill-test-subdir: + description: "E2E Node PFX Kill Test Subdirectory" + value: ${{ jobs.env-vars.outputs.e2e_node_pfx_kill_test_subdir }} e2e-node-local-build-test-subdir: description: "E2E Node Local Build Test Subdirectory" value: ${{ jobs.env-vars.outputs.e2e_node_local_build_test_subdir }} + e2e-node-add-test-subdir: + description: "E2E Node Add Test Subdirectory" + value: ${{ jobs.env-vars.outputs.e2e_node_add_test_subdir }} e2e-relay-test-subdir: description: "E2E Relay Test Subdirectory" value: ${{ jobs.env-vars.outputs.e2e_relay_test_subdir }} @@ -50,15 +53,18 @@ on: e2e-mirror-node-coverage-report: description: "E2E Mirror Node Tests Coverage Report" value: ${{ jobs.env-vars.outputs.e2e_mirror_node_coverage_report }} - e2e-node-pem-stop-add-coverage-report: - description: "E2E Node PEM Stop Add Tests Coverage Report" - value: ${{ jobs.env-vars.outputs.e2e_node_pem_stop_add_coverage_report }} - e2e-node-pfx-kill-add-coverage-report: - description: "E2E Node PFX Kill Add Tests Coverage Report" - value: ${{ jobs.env-vars.outputs.e2e_node_pfx_kill_add_coverage_report }} + e2e-node-pem-stop-coverage-report: + description: "E2E Node PEM Stop Tests Coverage Report" + value: ${{ jobs.env-vars.outputs.e2e_node_pem_stop_coverage_report }} + e2e-node-pfx-kill-coverage-report: + description: "E2E Node PFX Kill Tests Coverage Report" + value: ${{ jobs.env-vars.outputs.e2e_node_pfx_kill_coverage_report }} e2e-node-local-build-coverage-report: description: "E2E Node Local Build Tests Coverage Report" value: ${{ jobs.env-vars.outputs.e2e_node_local_build_coverage_report }} + e2e-node-add-coverage-report: + description: "E2E Node Add Tests Coverage Report" + value: ${{ jobs.env-vars.outputs.e2e_node_add_coverage_report }} e2e-relay-coverage-report: description: "E2E Relay Tests Coverage Report" value: ${{ jobs.env-vars.outputs.e2e_relay_coverage_report }} @@ -74,15 +80,17 @@ jobs: outputs: e2e_test_subdir: e2e e2e_mirror_node_test_subdir: e2e-mirror-node - e2e_node_pem_stop_add_test_subdir: e2e-node-pem-stop-add - e2e_node_pfx_kill_add_test_subdir: e2e-node-pfx-kill-add + e2e_node_pem_stop_test_subdir: e2e-node-pem-stop + e2e_node_pfx_kill_test_subdir: e2e-node-pfx-kill e2e_node_local_build_test_subdir: e2e-node-local-build + e2e_node_add_test_subdir: e2e-node-add e2e_relay_test_subdir: e2e-relay e2e_coverage_report: "E2E Tests Coverage Report" e2e_mirror_node_coverage_report: "E2E Mirror Node Tests Coverage Report" - e2e_node_pem_stop_add_coverage_report: "E2E Node PEM Stop Add Tests Coverage Report" - e2e_node_pfx_kill_add_coverage_report: "E2E Node PFX Kill Add Tests Coverage Report" + e2e_node_pem_stop_coverage_report: "E2E Node PEM Stop Tests Coverage Report" + e2e_node_pfx_kill_coverage_report: "E2E Node PFX Kill Tests Coverage Report" e2e_node_local_build_coverage_report: "E2E Node Local Build Tests Coverage Report" + e2e_node_add_coverage_report: "E2E Node Add Tests Coverage Report" e2e_relay_coverage_report: "E2E Relay Tests Coverage Report" steps: - run: echo "Exposing environment variables to reusable workflows" diff --git a/README.md b/README.md index 30b7ebb5c..2da0578d5 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,7 @@ Then run the following command to set the kubectl context to the new cluster: ```bash kind create cluster -n "${SOLO_CLUSTER_NAME}" ``` + Example output ``` @@ -184,6 +185,7 @@ Kubernetes Namespace : solo ✔ Generate gRPC TLS keys ✔ Finalize ``` + Key files are generated in `~/.solo/keys` directory. ``` @@ -192,6 +194,7 @@ $ ls ~/.solo/cache/keys hedera-node0.crt hedera-node1.crt hedera-node2.crt private-node0.pfx private-node2.pfx hedera-node0.key hedera-node1.key hedera-node2.key private-node1.pfx public.pfx ``` + * Setup cluster with shared components * In a separate terminal, you may run `k9s` to view the pod status. @@ -214,7 +217,6 @@ Kubernetes Namespace : solo ✔ Install 'fullstack-cluster-setup' chart ``` - * Deploy helm chart with Hedera network components * It may take a while (5~15 minutes depending on your internet speed) to download various docker images and get the pods started. * If it fails, ensure you have enough resources allocated for Docker engine and retry the command. @@ -334,6 +336,7 @@ Kubernetes Namespace : solo ✔ Check proxy for node: node0 ✔ Check node proxies are ACTIVE ``` + * Deploy mirror node ``` @@ -518,7 +521,9 @@ Kubernetes Namespace : solo ✔ Generate gRPC TLS keys ✔ Finalize ``` + PEM key files are generated in `~/.solo/keys` directory. + ``` $ ls ~/.solo/cache/keys a-private-node0.pem a-public-node1.pem hedera-node1.crt s-private-node0.pem s-public-node1.pem @@ -526,6 +531,7 @@ a-private-node1.pem a-public-node2.pem hedera-node1.key s-private-node1.pem a-private-node2.pem hedera-node0.crt hedera-node2.crt s-private-node2.pem a-public-node0.pem hedera-node0.key hedera-node2.key s-public-node0.pem ``` + * Setup cluster with shared components ``` @@ -561,16 +567,18 @@ $ solo node start # output is similar to example-1 ``` + ## For Developers Working on Hedera Service Repo First, pleaes clone hedera service repo `https://github.com/hashgraph/hedera-services/` and build the code with `./gradlew assemble`. If need to running nodes with different versions or releases, please duplicate the repo or build directories in -multiple directories, checkout to the respective version and build the code. +multiple directories, checkout to the respective version and build the code. To set customized `settings.txt` file, edit the file `~/.solo/cache/templates/settings.txt` after `solo init` command. Then you can start customized built hedera network with the following command: + ``` solo node setup --local-build-path ,node1=,node2= ``` @@ -578,16 +586,18 @@ solo node setup --local-build-path ,node1=,node1=,node2= --app PlatformTestingTool.jar --app-config ``` + ## Logs + You can find log for running solo command under the directory `~/.solo/logs/` -The file `solo.log` contains the logs for the solo command. +The file `solo.log` contains the logs for the solo command. The file `hashgraph-sdk.log` contains the logs from solo client when sending transactions to network nodes. - ## Support If you have a question on how to use the product, please see our [support guide](https://github.com/hashgraph/.github/blob/main/SUPPORT.md). diff --git a/docs/content/_index.md b/docs/content/_index.md index c63a844f7..13e1d34c5 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -1,13 +1,17 @@ ---- +*** + title: Welcome to Solo Documentation geekdocNav: true geekdocAlign: center geekdocAnchor: false geekdocDescription: Home page for Solo Documentation ---- + +*** + + [![NPM Version](https://img.shields.io/npm/v/%40hashgraph%2Fsolo?logo=npm)](https://www.npmjs.com/package/@hashgraph/solo) @@ -21,8 +25,7 @@ Solo is an opinionated CLI tool to deploy and manage standalone test networks. {{< button size="large" relref="getting-started/installation.md" >}}Getting Started{{< /button >}} -Feature overview ------------------------------ +## Feature overview {{< columns >}} @@ -33,6 +36,7 @@ Stay focused on deployment and don't get overwhelmed by a complex design. {{< /columns >}} {{< columns >}} + ### Easy configuration Getting started in minutes. Solo comes with easy to use configuration. diff --git a/docs/content/contribution/contribution.md b/docs/content/contribution/contribution.md index d7b9166bf..df86715ed 100644 --- a/docs/content/contribution/contribution.md +++ b/docs/content/contribution/contribution.md @@ -1,6 +1,7 @@ ---- +*** + title: Solo Contribution weight: -20 geekdocNav: true geekdocAlign: center ---- +-------------------- diff --git a/docs/content/contribution/docs.md b/docs/content/contribution/docs.md index 5cc496695..1f02651bb 100644 --- a/docs/content/contribution/docs.md +++ b/docs/content/contribution/docs.md @@ -1,6 +1,7 @@ ---- +*** + title: Docs Contribution weight: -20 geekdocNav: true geekdocAlign: center ---- +-------------------- diff --git a/docs/content/getting-started/deploy.md b/docs/content/getting-started/deploy.md index 7185be62f..e0c528189 100644 --- a/docs/content/getting-started/deploy.md +++ b/docs/content/getting-started/deploy.md @@ -1,10 +1,11 @@ ---- +*** + title: Deploy weight: -20 geekdocNav: true geekdocAlign: center geekdocAnchor: false ---- +-------------------- ### Example - 1: Deploy a standalone test network (version `0.42.5`) diff --git a/docs/content/getting-started/installation.md b/docs/content/getting-started/installation.md index 957a68cf1..d9c08b923 100644 --- a/docs/content/getting-started/installation.md +++ b/docs/content/getting-started/installation.md @@ -1,14 +1,15 @@ ---- +*** + title: Installation weight: -20 geekdocNav: true geekdocAlign: center geekdocAnchor: false ---- +-------------------- ### Requirements -Node(>=20.14.0) (_lts/hydrogen_) +Node(>=20.14.0) (*lts/hydrogen*) ### Setup diff --git a/docs/content/getting-started/setup.md b/docs/content/getting-started/setup.md index f6101b042..7e6bb1edc 100644 --- a/docs/content/getting-started/setup.md +++ b/docs/content/getting-started/setup.md @@ -1,10 +1,11 @@ ---- +*** + title: Setup weight: -20 geekdocNav: true geekdocAlign: center geekdocAnchor: false ---- +-------------------- ### Remote cluster diff --git a/package-lock.json b/package-lock.json index 8712c2415..a732b4996 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,8 +14,8 @@ "win32" ], "dependencies": { - "@hashgraph/proto": "^2.15.0", - "@hashgraph/sdk": "^2.49.2", + "@hashgraph/proto": "^2.15.0-beta.3", + "@hashgraph/sdk": "^2.50.0-beta.3", "@kubernetes/client-node": "^0.21.0", "@listr2/prompt-adapter-enquirer": "^2.0.11", "@peculiar/x509": "^1.12.1", @@ -1166,9 +1166,9 @@ "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" }, "node_modules/@hashgraph/cryptography": { - "version": "1.4.8-beta.5", - "resolved": "https://registry.npmjs.org/@hashgraph/cryptography/-/cryptography-1.4.8-beta.5.tgz", - "integrity": "sha512-soq2vGLRkdl2Evr+gIvIjCXJjqA1hOAjysBGG+dhP6tKx2PEgEjb3hON/sMbxm3Q4qQdkML/vEthdAV707+flw==", + "version": "1.4.8-beta.6", + "resolved": "https://registry.npmjs.org/@hashgraph/cryptography/-/cryptography-1.4.8-beta.6.tgz", + "integrity": "sha512-FR8uG5XpLj/rFpEBNEhqB/988spnblAiCSJEOVxfc8X8AH1dVH3kWF5/4sI9fmbviySM7rgoXNdcM0mRZvXMJw==", "dependencies": { "asn1js": "^3.0.5", "bignumber.js": "^9.1.1", @@ -1202,33 +1202,10 @@ } } }, - "node_modules/@hashgraph/cryptography/node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "node_modules/@hashgraph/proto": { - "version": "2.15.0", - "resolved": "https://registry.npmjs.org/@hashgraph/proto/-/proto-2.15.0.tgz", - "integrity": "sha512-ULSNIwQZIroTssrEfNoUcIcWEJ9BIwKZiAsaRvJ2+Rr3XIr+np7UXv6sEkJU+jSyzk97LrTdiRAoc/hJO9Vx8Q==", + "version": "2.15.0-beta.3", + "resolved": "https://registry.npmjs.org/@hashgraph/proto/-/proto-2.15.0-beta.3.tgz", + "integrity": "sha512-/95cydqBQRaO1gagBOenNpcfJIcnRx+vzefhuzSFNp4pfl0AM3oXau39hM2raKLhFHoKZysSjkXkMp24AaCE5w==", "dependencies": { "long": "^4.0.0", "protobufjs": "^7.2.5" @@ -1238,16 +1215,16 @@ } }, "node_modules/@hashgraph/sdk": { - "version": "2.49.2", - "resolved": "https://registry.npmjs.org/@hashgraph/sdk/-/sdk-2.49.2.tgz", - "integrity": "sha512-HqESeH6gF/QEm69qmEyPZ40i9w2jBXsyXFqT/kRsrb7yEyCdVrG1Wnt88HxOSP9XyOsm3hj4OBTEiy2sI+kl+A==", + "version": "2.50.0-beta.3", + "resolved": "https://registry.npmjs.org/@hashgraph/sdk/-/sdk-2.50.0-beta.3.tgz", + "integrity": "sha512-0mGnzbeEl7e3UqlCDmXLIm/jUJNF+VNhuxaqq6wSxZe8fau7TXkOjat5nP4XUcYOTgEHJ4rWeO+WEukMDL2h3w==", "dependencies": { "@ethersproject/abi": "^5.7.0", "@ethersproject/bignumber": "^5.7.0", "@ethersproject/bytes": "^5.7.0", "@ethersproject/rlp": "^5.7.0", "@grpc/grpc-js": "1.8.2", - "@hashgraph/cryptography": "1.4.8-beta.5", + "@hashgraph/cryptography": "1.4.8-beta.6", "@hashgraph/proto": "2.15.0-beta.3", "axios": "^1.6.4", "bignumber.js": "^9.1.1", @@ -1273,18 +1250,6 @@ } } }, - "node_modules/@hashgraph/sdk/node_modules/@hashgraph/proto": { - "version": "2.15.0-beta.3", - "resolved": "https://registry.npmjs.org/@hashgraph/proto/-/proto-2.15.0-beta.3.tgz", - "integrity": "sha512-/95cydqBQRaO1gagBOenNpcfJIcnRx+vzefhuzSFNp4pfl0AM3oXau39hM2raKLhFHoKZysSjkXkMp24AaCE5w==", - "dependencies": { - "long": "^4.0.0", - "protobufjs": "^7.2.5" - }, - "engines": { - "node": ">=10.0.0" - } - }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.14", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", @@ -3378,6 +3343,29 @@ "node-int64": "^0.4.0" } }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, "node_modules/buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", @@ -9481,29 +9469,6 @@ "split2": "^4.0.0" } }, - "node_modules/pino-abstract-transport/node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "node_modules/pino-abstract-transport/node_modules/readable-stream": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.4.2.tgz", @@ -9543,29 +9508,6 @@ "pino-pretty": "bin.js" } }, - "node_modules/pino-pretty/node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "node_modules/pino-pretty/node_modules/readable-stream": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.4.2.tgz", diff --git a/package.json b/package.json index 4532186c4..85b71ebbe 100644 --- a/package.json +++ b/package.json @@ -15,9 +15,10 @@ "test-e2e-all": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E All Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e' --testPathIgnorePatterns=\".*/unit/.*\"", "test-e2e": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e' --testPathIgnorePatterns=\".*/unit/.*\" --testPathIgnorePatterns=\".*/e2e/commands/mirror_node.*\" --testPathIgnorePatterns=\".*/e2e/commands/node.*\" --testPathIgnorePatterns=\".*/e2e/commands/relay.*\"", "test-e2e-mirror-node": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Mirror Node Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-mirror-node.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-mirror-node' --testRegex=\".*\\/e2e\\/commands\\/mirror_node\\.test\\.mjs\"", - "test-e2e-node-pem-stop-add": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node PEM Stop Add Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-pem-stop-add.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-pem-stop-add' --testRegex=\".*\\/e2e\\/commands\\/node_pem_stop_add\\.test\\.mjs\"", - "test-e2e-node-pfx-kill-add": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node PFX Kill Add Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-pfx-kill-add.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-pfx-kill-add' --testRegex=\".*\\/e2e\\/commands\\/node_pfx_kill_add\\.test\\.mjs\"", + "test-e2e-node-pem-stop": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node PEM Stop Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-pem-stop.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-pem-stop' --testRegex=\".*\\/e2e\\/commands\\/node_pem_stop\\.test\\.mjs\"", + "test-e2e-node-pfx-kill": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node PFX Kill Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-pfx-kill.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-pfx-kill' --testRegex=\".*\\/e2e\\/commands\\/node_pfx_kill\\.test\\.mjs\"", "test-e2e-node-local-build": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Local Custom Build' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-local-build.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-local-build' --testRegex=\".*\\/e2e\\/commands\\/node-local.*\\.test\\.mjs\"", + "test-e2e-node-add": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Node Add Custom Build' JEST_JUNIT_OUTPUT_NAME='junit-e2e-node-add.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-node-add' --testRegex=\".*\\/e2e\\/commands\\/node-add.*\\.test\\.mjs\"", "test-e2e-relay": "NODE_OPTIONS=--experimental-vm-modules JEST_SUITE_NAME='Jest E2E Relay Tests' JEST_JUNIT_OUTPUT_NAME='junit-e2e-relay.xml' jest --runInBand --detectOpenHandles --forceExit --coverage --coverageDirectory='coverage/e2e-relay' --testRegex=\".*\\/e2e\\/commands\\/relay\\.test\\.mjs\"", "merge-clean": "rm -rf .nyc_output && mkdir .nyc_output && rm -rf coverage/lcov-report && rm -rf coverage/solo && rm coverage/*.*", "merge-e2e": "nyc merge ./coverage/e2e/ .nyc_output/coverage.json", @@ -37,8 +38,8 @@ "author": "Swirlds Labs", "license": "Apache2.0", "dependencies": { - "@hashgraph/proto": "^2.15.0", - "@hashgraph/sdk": "^2.49.2", + "@hashgraph/proto": "^2.15.0-beta.3", + "@hashgraph/sdk": "^2.50.0-beta.3", "@kubernetes/client-node": "^0.21.0", "@listr2/prompt-adapter-enquirer": "^2.0.11", "@peculiar/x509": "^1.12.1", diff --git a/resources/support-zip.sh b/resources/support-zip.sh new file mode 100644 index 000000000..56da3ab76 --- /dev/null +++ b/resources/support-zip.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# This script creates a zip file so that it can be copied out of the pod for research purposes + +readonly HAPI_DIR=/opt/hgcapp/services-hedera/HapiApp2.0 +readonly RESEARCH_ZIP=${HOSTNAME}.zip +readonly ZIP_FULLPATH=${HAPI_DIR}/${RESEARCH_ZIP} +readonly FILE_LIST=${HAPI_DIR}/support-zip-file-list.txt +readonly CONFIG_TXT=config.txt +readonly SETTINGS_TXT=settings.txt +readonly SETTINGS_USED_TXT=settingsUsed.txt +readonly OUTPUT_DIR=output +readonly DATA_DIR=data +readonly ADDRESS_BOOK_DIR=${DATA_DIR}/saved/address_book +readonly CONFIG_DIR=${DATA_DIR}/config +readonly KEYS_DIR=${DATA_DIR}/keys +readonly UPGRADE_DIR=${DATA_DIR}/upgrade +readonly JOURNAL_CTL_LOG=${OUTPUT_DIR}/journalctl.log + +AddToFileList() +{ + if [[ -d "${1}" ]];then + find "${1}" -name "*" -printf '\047%p\047\n' >>${FILE_LIST} + return + fi + + if [[ -f "${1}" ]];then + find . -maxdepth 1 -type f -name "${1}" -print >>${FILE_LIST} + else + echo "skipping: ${1}, file or directory not found" + fi +} + +cd ${HAPI_DIR} +echo -n > ${FILE_LIST} +journalctl > ${JOURNAL_CTL_LOG} +AddToFileList ${CONFIG_TXT} +AddToFileList ${SETTINGS_TXT} +AddToFileList ${SETTINGS_USED_TXT} +AddToFileList ${OUTPUT_DIR} +AddToFileList ${ADDRESS_BOOK_DIR} +AddToFileList ${CONFIG_DIR} +AddToFileList ${KEYS_DIR} +AddToFileList ${UPGRADE_DIR} +jar cvfM "${ZIP_FULLPATH}" "@${FILE_LIST}" diff --git a/resources/templates/application.properties b/resources/templates/application.properties index 09eef8f0d..baa214200 100644 --- a/resources/templates/application.properties +++ b/resources/templates/application.properties @@ -1,2 +1,15 @@ +ledger.id=0x01 +contracts.chainId=298 +hedera.recordStream.logPeriod=1 +balances.exportPeriodSecs=400 +files.maxSizeKb=2048 +hedera.recordStream.compressFilesOnCreation=true +balances.compressOnCreation=true +contracts.maxNumWithHapiSigsAccess=0 autoRenew.targetTypes= -hedera.config.version=0 \ No newline at end of file +hedera.config.version=0 +nodes.gossipFqdnRestricted=false +netty.mode=TEST +hedera.profiles.active=TEST +# TODO: this is a workaround until prepareUpgrade freeze will recalculate the weight prior to writing the config.txt +staking.periodMins=1 diff --git a/resources/templates/log4j2.xml b/resources/templates/log4j2.xml index cb70b0a88..1234fc7e0 100644 --- a/resources/templates/log4j2.xml +++ b/resources/templates/log4j2.xml @@ -12,6 +12,7 @@ %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p %-4L %c{1} - %m{nolookups}%n + @@ -35,6 +36,7 @@ %d{yyyy-MM-dd HH:mm:ss.SSS} %-8sn %-5p %-16marker <%t> %c{1}: %msg{nolookups}%n + @@ -91,7 +93,7 @@ - + diff --git a/resources/templates/settings.txt b/resources/templates/settings.txt index 7ecdc3ccf..3b34a834b 100644 --- a/resources/templates/settings.txt +++ b/resources/templates/settings.txt @@ -9,3 +9,6 @@ state.mainClassNameOverride, com.hedera.services.ServicesMain ############################# crypto.enableNewKeyStoreModel, true + +# TODO: remove this? only defaults to true when going from 0.52 to 0.53 +event.migrateEventHashing, false diff --git a/src/commands/flags.mjs b/src/commands/flags.mjs index 6955076b4..8c8c6b361 100644 --- a/src/commands/flags.mjs +++ b/src/commands/flags.mjs @@ -614,6 +614,47 @@ export const amount = { } } +/** @type {CommandFlag} **/ +export const nodeID = { + constName: 'nodeId', + name: 'node-id', + definition: { + describe: 'Node id (e.g. node99)', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const gossipEndpoints = { + constName: 'gossipEndpoints', + name: 'gossip-endpoints', + definition: { + describe: 'Comma separated gossip endpoints of the node(e.g. first one is internal, second one is external)', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const grpcEndpoints = { + constName: 'grpcEndpoints', + name: 'grpc-endpoints', + definition: { + describe: 'Comma separated gRPC endpoints of the node (at most 8)', + type: 'string' + } +} + +/** @type {CommandFlag} **/ +export const endpointType = { + constName: 'endpointType', + name: 'endpoint-type', + definition: { + describe: 'Endpoint type (IP or FQDN)', + defaultValue: constants.ENDPOINT_TYPE_FQDN, + type: 'string' + } +} + /** @type {CommandFlag[]} **/ export const allFlags = [ accountId, @@ -642,15 +683,19 @@ export const allFlags = [ ecdsaPrivateKey, enableHederaExplorerTls, enablePrometheusSvcMonitor, + endpointType, fstChartVersion, generateGossipKeys, generateTlsKeys, + gossipEndpoints, + grpcEndpoints, hederaExplorerTlsHostName, hederaExplorerTlsLoadBalancerIp, keyFormat, localBuildPath, log4j2Xml, namespace, + nodeID, nodeIDs, operatorId, operatorKey, diff --git a/src/commands/node.mjs b/src/commands/node.mjs index 84d77be91..5e3885a28 100644 --- a/src/commands/node.mjs +++ b/src/commands/node.mjs @@ -14,6 +14,7 @@ * limitations under the License. * */ +import * as x509 from '@peculiar/x509' import chalk from 'chalk' import * as fs from 'fs' import { readFile, writeFile } from 'fs/promises' @@ -21,22 +22,31 @@ import { Listr } from 'listr2' import path from 'path' import { FullstackTestingError, IllegalArgumentError } from '../core/errors.mjs' import * as helpers from '../core/helpers.mjs' -import { getNodeLogs, getTmpDir, sleep, validatePath } from '../core/helpers.mjs' -import { constants, Templates } from '../core/index.mjs' +import { getNodeAccountMap, getNodeLogs, getTmpDir, sleep, validatePath } from '../core/helpers.mjs' +import { constants, Templates, Zippy } from '../core/index.mjs' import { BaseCommand } from './base.mjs' import * as flags from './flags.mjs' import * as prompts from './prompts.mjs' + import { AccountBalanceQuery, - AccountId, - FileContentsQuery, - FileId, + AccountUpdateTransaction, + FileUpdateTransaction, + FileAppendTransaction, FreezeTransaction, FreezeType, - Timestamp + ServiceEndpoint, + Timestamp, + PrivateKey, + AccountId, + NodeCreateTransaction } from '@hashgraph/sdk' import * as crypto from 'crypto' -import { FREEZE_ADMIN_ACCOUNT } from '../core/constants.mjs' +import { + FREEZE_ADMIN_ACCOUNT, + HEDERA_NODE_DEFAULT_STAKE_AMOUNT, + TREASURY_ACCOUNT_ID +} from '../core/constants.mjs' /** * Defines the core functionalities of 'node' command @@ -131,14 +141,17 @@ export class NodeCommand extends BaseCommand { flags.chainId, flags.chartDirectory, flags.devMode, + flags.endpointType, flags.force, flags.fstChartVersion, flags.generateGossipKeys, flags.generateTlsKeys, + flags.gossipEndpoints, + flags.grpcEndpoints, flags.keyFormat, flags.log4j2Xml, flags.namespace, - flags.nodeIDs, + flags.nodeID, flags.releaseTag, flags.settingTxt ] @@ -159,6 +172,46 @@ export class NodeCommand extends BaseCommand { this._portForwards = [] } + async addStake (namespace, accountId, nodeId) { + try { + await this.accountManager.loadNodeClient(namespace) + const client = this.accountManager._nodeClient + const treasuryKey = await this.accountManager.getTreasuryAccountKeys(namespace) + const treasuryPrivateKey = PrivateKey.fromStringED25519(treasuryKey.privateKey) + client.setOperator(TREASURY_ACCOUNT_ID, treasuryPrivateKey) + + // get some initial balance + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, HEDERA_NODE_DEFAULT_STAKE_AMOUNT + 1) + + // check balance + const balance = await new AccountBalanceQuery() + .setAccountId(accountId) + .execute(client) + this.logger.debug(`Account ${accountId} balance: ${balance.hbars}`) + + // Create the transaction + const transaction = await new AccountUpdateTransaction() + .setAccountId(accountId) + .setStakedNodeId(Templates.nodeNumberFromNodeId(nodeId) - 1) + .freezeWith(client) + + // Sign the transaction with the account's private key + const signTx = await transaction.sign(treasuryPrivateKey) + + // Submit the transaction to a Hedera network + const txResponse = await signTx.execute(client) + + // Request the receipt of the transaction + const receipt = await txResponse.getReceipt(client) + + // Get the transaction status + const transactionStatus = receipt.status + this.logger.debug(`The transaction consensus status is ${transactionStatus.toString()}`) + } catch (e) { + throw new FullstackTestingError(`Error in adding stake: ${e.message}`, e) + } + } + async checkNetworkNodePod (namespace, nodeId, maxAttempts = 60, delay = 2000) { nodeId = nodeId.trim() const podName = Templates.renderNetworkPodName(nodeId) @@ -206,7 +259,7 @@ export class NodeCommand extends BaseCommand { try { const output = await this.k8.execContainer(podName, constants.ROOT_CONTAINER, ['tail', '-100', logfilePath]) if (output && output.indexOf('Terminating Netty') < 0 && // make sure we are not at the beginning of a restart - (output.indexOf(`Now current platform status = ${status}`) > 0 || + (output.indexOf(`Now current platform status = ${status}`) > 0 || output.indexOf(`Platform Status Change ${status}`) > 0 || output.indexOf(`is ${status}`) > 0)) { // 'is ACTIVE' is for newer versions, first seen in v0.49.0 this.logger.debug(`Node ${nodeId} is ${status} [ attempt: ${attempt}/${maxAttempt}]`) @@ -445,7 +498,7 @@ export class NodeCommand extends BaseCommand { } } - uploadPlatformSoftware (ctx, task, localBuildPath) { + uploadPlatformSoftware (nodeIds, podNames, task, localBuildPath) { const self = this const subTasks = [] @@ -464,8 +517,8 @@ export class NodeCommand extends BaseCommand { } let localDataLibBuildPath - for (const nodeId of ctx.config.nodeIds) { - const podName = ctx.config.podNames[nodeId] + for (const nodeId of nodeIds) { + const podName = podNames[nodeId] if (buildPathMap.has(nodeId)) { localDataLibBuildPath = buildPathMap.get(nodeId) } else { @@ -497,16 +550,24 @@ export class NodeCommand extends BaseCommand { }) } - fetchPlatformSoftware (ctx, task, platformInstaller) { - const config = ctx.config + fetchLocalOrReleasedPlatformSoftware (nodeIds, podNames, releaseTag, task) { + const self = this + const localBuildPath = self.configManager.getFlag(flags.localBuildPath) + if (localBuildPath !== '') { + return self.uploadPlatformSoftware(nodeIds, podNames, task, localBuildPath) + } else { + return self.fetchPlatformSoftware(nodeIds, podNames, releaseTag, task, self.platformInstaller) + } + } + fetchPlatformSoftware (nodeIds, podNames, releaseTag, task, platformInstaller) { const subTasks = [] - for (const nodeId of ctx.config.nodeIds) { - const podName = ctx.config.podNames[nodeId] + for (const nodeId of nodeIds) { + const podName = podNames[nodeId] subTasks.push({ - title: `Update node: ${chalk.yellow(nodeId)}`, + title: `Update node: ${chalk.yellow(nodeId)} [ platformVersion = ${releaseTag} ]`, task: () => - platformInstaller.fetchPlatform(podName, config.releaseTag) + platformInstaller.fetchPlatform(podName, releaseTag) }) } @@ -519,6 +580,108 @@ export class NodeCommand extends BaseCommand { }) } + async prepareUpgradeZip (stagingDir) { + // we build a mock upgrade.zip file as we really don't need to upgrade the network + // also the platform zip file is ~80Mb in size requiring a lot of transactions since the max + // transaction size is 6Kb and in practice we need to send the file as 4Kb chunks. + // Note however that in DAB phase-2, we won't need to trigger this fake upgrade process + const zipper = new Zippy(this.logger) + const upgradeConfigDir = `${stagingDir}/mock-upgrade/data/config` + if (!fs.existsSync(upgradeConfigDir)) { + fs.mkdirSync(upgradeConfigDir, { recursive: true }) + } + + // bump field hedera.config.version + const fileBytes = fs.readFileSync(`${stagingDir}/templates/application.properties`) + const lines = fileBytes.toString().split('\n') + const newLines = [] + for (let line of lines) { + line = line.trim() + const parts = line.split('=') + if (parts.length === 2) { + if (parts[0] === 'hedera.config.version') { + let version = parseInt(parts[1]) + line = `hedera.config.version=${++version}` + } + newLines.push(line) + } + } + fs.writeFileSync(`${upgradeConfigDir}/application.properties`, newLines.join('\n')) + + return await zipper.zip(`${stagingDir}/mock-upgrade`, `${stagingDir}/mock-upgrade.zip`) + } + + async uploadUpgradeZip (upgradeZipFile, nodeClient) { + // get byte value of the zip file + const zipBytes = fs.readFileSync(upgradeZipFile) + const zipHash = crypto.createHash('sha384').update(zipBytes).digest('hex') + this.logger.debug(`loaded upgrade zip file [ zipHash = ${zipHash} zipBytes.length = ${zipBytes.length}, zipPath = ${upgradeZipFile}]`) + + // create a file upload transaction to upload file to the network + try { + let start = 0 + + while (start < zipBytes.length) { + const zipBytesChunk = new Uint8Array(zipBytes.subarray(start, constants.UPGRADE_FILE_CHUNK_SIZE)) + let fileTransaction = null + + if (start === 0) { + fileTransaction = new FileUpdateTransaction() + .setFileId(constants.UPGRADE_FILE_ID) + .setContents(zipBytesChunk) + } else { + fileTransaction = new FileAppendTransaction() + .setFileId(constants.UPGRADE_FILE_ID) + .setContents(zipBytesChunk) + } + const resp = await fileTransaction.execute(nodeClient) + const receipt = await resp.getReceipt(nodeClient) + this.logger.debug(`updated file ${constants.UPGRADE_FILE_ID} [chunkSize= ${zipBytesChunk.length}, txReceipt = ${receipt.toString()}]`) + + start += constants.UPGRADE_FILE_CHUNK_SIZE + } + + return zipHash + } catch (e) { + throw new FullstackTestingError(`failed to upload build.zip file: ${e.message}`, e) + } + } + + prepareEndpoints (endpointType, endpoints, defaultPort) { + const ret = /** @typedef ServiceEndpoint **/[] + for (const endpoint of endpoints) { + const parts = endpoint.split(':') + + let url = '' + let port = defaultPort + + if (parts.length === 2) { + url = parts[0].trim() + port = parts[1].trim() + } else if (parts.length === 1) { + url = parts[0] + } else { + throw new FullstackTestingError(`incorrect endpoint format. expected url:port, found ${endpoint}`) + } + + if (endpointType.toUpperCase() === constants.ENDPOINT_TYPE_IP) { + ret.push(new ServiceEndpoint({ + port, + ipAddressV4: helpers.parseIpAddressToUint8Array(url) + })) + } else { + ret.push(new ServiceEndpoint({ + port, + domainName: url + })) + } + } + + return ret + } + + // List of Commands + async setup (argv) { const self = this @@ -570,6 +733,7 @@ export class NodeCommand extends BaseCommand { * @property {Date} curDate * @property {string} keysDir * @property {string[]} nodeIds + * @property {Object} podNames * @property {string} releasePrefix * @property {string} stagingDir * @property {string} stagingKeysDir @@ -588,6 +752,7 @@ export class NodeCommand extends BaseCommand { 'curDate', 'keysDir', 'nodeIds', + 'podNames', 'releasePrefix', 'stagingDir', 'stagingKeysDir' @@ -665,7 +830,7 @@ export class NodeCommand extends BaseCommand { { title: 'Copy Gossip keys to staging', task: async (ctx, _) => { - await this.copyGossipKeysToStaging(ctx.config, ctx.config.nodeIds) + await this.copyGossipKeysToStaging(ctx.config.keyFormat, ctx.config.keysDir, ctx.config.stagingKeysDir, ctx.config.nodeIds) } }, { @@ -703,11 +868,8 @@ export class NodeCommand extends BaseCommand { title: 'Fetch platform software into network nodes', task: async (ctx, task) => { - if (ctx.config.localBuildPath !== '') { - return self.uploadPlatformSoftware(ctx, task, ctx.config.localBuildPath) - } else { - return self.fetchPlatformSoftware(ctx, task, self.platformInstaller) - } + const config = /** @type {NodeSetupConfigClass} **/ ctx.config + return self.fetchLocalOrReleasedPlatformSoftware(config.nodeIds, config.podNames, config.releaseTag, task) } }, { @@ -773,6 +935,7 @@ export class NodeCommand extends BaseCommand { ]) ctx.config = { + app: self.configManager.getFlag(flags.app), cacheDir: self.configManager.getFlag(flags.cacheDir), namespace: self.configManager.getFlag(flags.namespace), nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)) @@ -793,7 +956,7 @@ export class NodeCommand extends BaseCommand { title: 'Starting nodes', task: (ctx, task) => { const subTasks = [] - self.startNodes(ctx.config, ctx.config.nodeIds, subTasks) + self.startNodes(ctx.config.podNames, ctx.config.nodeIds, subTasks) // set up the sub-tasks return task.newListr(subTasks, { @@ -854,6 +1017,30 @@ export class NodeCommand extends BaseCommand { }) }, skip: (ctx, _) => self.configManager.getFlag(flags.app) !== '' + }, + { + title: 'Add node stakes', + task: (ctx, task) => { + if (ctx.config.app === '' || ctx.config.app === constants.HEDERA_APP_NAME) { + const subTasks = [] + const accountMap = getNodeAccountMap(ctx.config.nodeIds) + for (const nodeId of ctx.config.nodeIds) { + const accountId = accountMap.get(nodeId) + subTasks.push({ + title: `Adding stake for node: ${chalk.yellow(nodeId)}`, + task: () => self.addStake(ctx.config.namespace, accountId, nodeId) + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + } }], { concurrent: false, rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION @@ -1111,32 +1298,32 @@ export class NodeCommand extends BaseCommand { { title: 'Dump network nodes saved state', task: - async (ctx, task) => { - const subTasks = [] - for (const nodeId of ctx.config.nodeIds) { - const podName = ctx.config.podNames[nodeId] - subTasks.push({ - title: `Node: ${chalk.yellow(nodeId)}`, - task: async () => - await self.k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -rf ${constants.HEDERA_HAPI_PATH}/data/saved/*`]) - }) - } - - // set up the sub-tasks - return task.newListr(subTasks, { - concurrent: true, - rendererOptions: { - collapseSubtasks: false - } + async (ctx, task) => { + const subTasks = [] + for (const nodeId of ctx.config.nodeIds) { + const podName = ctx.config.podNames[nodeId] + subTasks.push({ + title: `Node: ${chalk.yellow(nodeId)}`, + task: async () => + await self.k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -rf ${constants.HEDERA_HAPI_PATH}/data/saved/*`]) }) } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: true, + rendererOptions: { + collapseSubtasks: false + } + }) + } }, { title: 'Fetch platform software into network nodes', task: - async (ctx, task) => { - return self.fetchPlatformSoftware(ctx, task, self.platformInstaller) - } + async (ctx, task) => { + return self.fetchLocalOrReleasedPlatformSoftware(ctx.config.nodeIds, ctx.config.podNames, ctx.config.releaseTag, task) + } }, { title: 'Setup network nodes', @@ -1180,7 +1367,7 @@ export class NodeCommand extends BaseCommand { title: 'Starting nodes', task: (ctx, task) => { const subTasks = [] - self.startNodes(ctx.config, ctx.config.nodeIds, subTasks) + self.startNodes(ctx.config.podNames, ctx.config.nodeIds, subTasks) // set up the sub-tasks return task.newListr(subTasks, { @@ -1313,8 +1500,11 @@ export class NodeCommand extends BaseCommand { flags.bootstrapProperties, flags.chartDirectory, flags.devMode, + flags.endpointType, flags.force, flags.fstChartVersion, + flags.gossipEndpoints, + flags.grpcEndpoints, flags.log4j2Xml, flags.settingTxt ]) @@ -1331,26 +1521,35 @@ export class NodeCommand extends BaseCommand { * @property {string} chainId * @property {string} chartDirectory * @property {boolean} devMode + * @property {string} endpointType * @property {boolean} force * @property {string} fstChartVersion * @property {boolean} generateGossipKeys * @property {boolean} generateTlsKeys + * @property {string} gossipEndpoints + * @property {string} grpcEndpoints * @property {string} keyFormat * @property {string} log4j2Xml * @property {string} namespace - * @property {string} nodeIDs + * @property {string} nodeId * @property {string} releaseTag * @property {string} settingTxt * -- extra args -- + * @property {PrivateKey} adminKey * @property {string[]} allNodeIds * @property {string} buildZipFile + * @property {string} chartPath * @property {Date} curDate * @property {string[]} existingNodeIds + * @property {string} freezeAdminPrivateKey * @property {string} keysDir + * @property {string} lastStateZipPath + * @property {Object} nodeClient * @property {string[]} nodeIds * @property {Object} podNames * @property {string} releasePrefix * @property {Map} serviceMap + * @property {PrivateKey} treasuryKey * @property {string} stagingDir * @property {string} stagingKeysDir * -- methods -- @@ -1364,22 +1563,32 @@ export class NodeCommand extends BaseCommand { // create a config object for subsequent steps const config = /** @type {NodeAddConfigClass} **/ this.getConfig(NodeCommand.ADD_CONFIGS_NAME, NodeCommand.ADD_FLAGS_LIST, [ + 'adminKey', 'allNodeIds', 'buildZipFile', + 'chartPath', 'curDate', 'existingNodeIds', + 'freezeAdminPrivateKey', 'keysDir', + 'lastStateZipPath', + 'nodeClient', 'nodeIds', 'podNames', 'releasePrefix', 'serviceMap', 'stagingDir', - 'stagingKeysDir' + 'stagingKeysDir', + 'treasuryKey' ]) - config.nodeIds = helpers.parseNodeIds(config.nodeIDs) config.curDate = new Date() config.existingNodeIds = [] + config.nodeIds = [config.nodeId] + + if (config.keyFormat !== constants.KEY_FORMAT_PEM) { + throw new FullstackTestingError('key type cannot be PFX') + } await self.initializeSetup(config, self.configManager, self.k8) @@ -1390,7 +1599,14 @@ export class NodeCommand extends BaseCommand { constants.FULLSTACK_TESTING_CHART, constants.FULLSTACK_DEPLOYMENT_CHART) // initialize Node Client with existing network nodes prior to adding the new node which isn't functioning, yet - await this.accountManager.loadNodeClient(ctx.config.namespace) + ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace) + + const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace) + config.freezeAdminPrivateKey = accountKeys.privateKey + + const treasuryAccount = await this.accountManager.getTreasuryAccountKeys(config.namespace) + const treasuryAccountPrivateKey = treasuryAccount.privateKey + config.treasuryKey = PrivateKey.fromStringED25519(treasuryAccountPrivateKey) self.logger.debug('Initialized config', { config }) } @@ -1408,10 +1624,11 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Deploy new network node', - task: async (ctx, task) => { + title: 'Determine new node account number', + task: (ctx, task) => { const values = { hedera: { nodes: [] } } - let maxNum + let maxNum = 0 + for (/** @type {NetworkNodeServices} **/ const networkNodeServices of ctx.config.serviceMap.values()) { values.hedera.nodes.push({ accountId: networkNodeServices.accountId, @@ -1421,59 +1638,19 @@ export class NodeCommand extends BaseCommand { ? maxNum : AccountId.fromString(networkNodeServices.accountId).num } - for (const nodeId of ctx.config.nodeIds) { - const accountId = AccountId.fromString(values.hedera.nodes[0].accountId) - accountId.num = ++maxNum - values.hedera.nodes.push({ - accountId: accountId.toString(), - name: nodeId - }) - } - let valuesArg = '' - let index = 0 - for (const node of values.hedera.nodes) { - valuesArg += ` --set "hedera.nodes[${index}].accountId=${node.accountId}" --set "hedera.nodes[${index}].name=${node.name}"` - index++ + ctx.maxNum = maxNum + ctx.newNode = { + accountId: `${constants.HEDERA_NODE_ACCOUNT_ID_START.realm}.${constants.HEDERA_NODE_ACCOUNT_ID_START.shard}.${++maxNum}`, + name: ctx.config.nodeId } - - await self.chartManager.upgrade( - ctx.config.namespace, - constants.FULLSTACK_DEPLOYMENT_CHART, - ctx.config.chartPath, - valuesArg, - ctx.config.fstChartVersion - ) - ctx.config.allNodeIds = [...ctx.config.existingNodeIds, ...ctx.config.nodeIds] } }, { - title: 'Check new network node pod is running', - task: async (ctx, task) => { - const subTasks = [] - for (const nodeId of ctx.config.nodeIds) { - subTasks.push({ - title: `Check new network pod: ${chalk.yellow(nodeId)}`, - task: async (ctx) => { - ctx.config.podNames[nodeId] = await this.checkNetworkNodePod(ctx.config.namespace, nodeId) - } - }) - } - - // setup the sub-tasks - return task.newListr(subTasks, { - concurrent: true, - rendererOptions: { - collapseSubtasks: false - } - }) - } - }, - { - title: 'Generate Gossip keys', + title: 'Generate Gossip key', task: async (ctx, parentTask) => { const config = ctx.config - const subTasks = self._nodeGossipKeysTaskList(config.keyFormat, config.nodeIds, config.keysDir, config.curDate, config.allNodeIds) + const subTasks = self._nodeGossipKeysTaskList(config.keyFormat, [config.nodeId], config.keysDir, config.curDate, config.allNodeIds) // set up the sub-tasks return parentTask.newListr(subTasks, { concurrent: false, @@ -1486,10 +1663,10 @@ export class NodeCommand extends BaseCommand { skip: (ctx, _) => !ctx.config.generateGossipKeys }, { - title: 'Generate gRPC TLS keys', + title: 'Generate gRPC TLS key', task: async (ctx, parentTask) => { const config = ctx.config - const subTasks = self._nodeTlsKeyTaskList(config.nodeIds, config.keysDir, config.curDate) + const subTasks = self._nodeTlsKeyTaskList([config.nodeId], config.keysDir, config.curDate) // set up the sub-tasks return parentTask.newListr(subTasks, { concurrent: false, @@ -1501,10 +1678,212 @@ export class NodeCommand extends BaseCommand { }, skip: (ctx, _) => !ctx.config.generateTlsKeys }, + { + title: 'Load signing key certificate', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const signingCertFile = Templates.renderGossipPemPublicKeyFile(constants.SIGNING_KEY_PREFIX, config.nodeId) + const signingCertFullPath = `${config.keysDir}/${signingCertFile}` + const signingCertPem = fs.readFileSync(signingCertFullPath).toString() + const decodedDers = x509.PemConverter.decode(signingCertPem) + if (!decodedDers || decodedDers.length === 0) { + throw new FullstackTestingError('unable to decode public key: ' + signingCertFile) + } + ctx.signingCertDer = new Uint8Array(decodedDers[0]) + } + }, + { + title: 'Compute mTLS certificate hash', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const tlsCertFile = Templates.renderTLSPemPublicKeyFile(config.nodeId) + const tlsCertFullPath = `${config.keysDir}/${tlsCertFile}` + const tlsCertPem = fs.readFileSync(tlsCertFullPath).toString() + const tlsCertDers = x509.PemConverter.decode(tlsCertPem) + if (!tlsCertDers || tlsCertDers.length === 0) { + throw new FullstackTestingError('unable to decode tls cert: ' + tlsCertFullPath) + } + const tlsCertDer = new Uint8Array(tlsCertDers[0]) + ctx.tlsCertHash = crypto.createHash('sha384').update(tlsCertDer).digest() + } + }, + { + title: 'Prepare gossip endpoints', + task: (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + let endpoints = [] + if (!config.gossipEndpoints) { + if (config.endpointType !== constants.ENDPOINT_TYPE_FQDN) { + throw new FullstackTestingError(`--gossip-endpoints must be set if --endpoint-type is: ${constants.ENDPOINT_TYPE_IP}`) + } + + endpoints = [ + `${Templates.renderFullyQualifiedNetworkPodName(config.namespace, config.nodeId)}:${constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT}`, + `${Templates.renderFullyQualifiedNetworkSvcName(config.namespace, config.nodeId)}:${constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT}` + ] + } else { + endpoints = helpers.splitFlagInput(config.gossipEndpoints) + } + + ctx.gossipEndpoints = this.prepareEndpoints(config.endpointType, endpoints, constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT) + } + }, + { + title: 'Prepare grpc service endpoints', + task: (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + let endpoints = [] + + if (!config.grpcEndpoints) { + if (config.endpointType !== constants.ENDPOINT_TYPE_FQDN) { + throw new FullstackTestingError(`--grpc-endpoints must be set if --endpoint-type is: ${constants.ENDPOINT_TYPE_IP}`) + } + + endpoints = [ + `${Templates.renderFullyQualifiedNetworkSvcName(config.namespace, config.nodeId)}:${constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT}` + ] + } else { + endpoints = helpers.splitFlagInput(config.grpcEndpoints) + } + + ctx.grpcServiceEndpoints = this.prepareEndpoints(config.endpointType, endpoints, constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT) + } + }, + { + title: 'Load node admin key', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + config.adminKey = PrivateKey.fromStringED25519(constants.GENESIS_KEY) + } + }, + { + title: 'Prepare upgrade zip file for node upgrade process', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + ctx.upgradeZipFile = await this.prepareUpgradeZip(config.stagingDir) + ctx.upgradeZipHash = await this.uploadUpgradeZip(ctx.upgradeZipFile, config.nodeClient) + } + }, + { + title: 'Check existing nodes staked amount', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + await sleep(60000) + const accountMap = getNodeAccountMap(config.existingNodeIds) + for (const nodeId of config.existingNodeIds) { + const accountId = accountMap.get(nodeId) + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, 1) + } + } + }, + { + title: 'Send node create transaction', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + + try { + const nodeCreateTx = await new NodeCreateTransaction() + .setAccountId(ctx.newNode.accountId) + .setGossipEndpoints(ctx.gossipEndpoints) + .setServiceEndpoints(ctx.grpcServiceEndpoints) + .setGossipCaCertificate(ctx.signingCertDer) + .setCertificateHash(ctx.tlsCertHash) + .setAdminKey(config.adminKey.publicKey) + .freezeWith(config.nodeClient) + const signedTx = await nodeCreateTx.sign(config.adminKey) + const txResp = await signedTx.execute(config.nodeClient) + const nodeCreateReceipt = await txResp.getReceipt(config.nodeClient) + this.logger.debug(`NodeCreateReceipt: ${nodeCreateReceipt.toString()}`) + } catch (e) { + this.logger.error(`Error adding node to network: ${e.message}`, e) + throw new FullstackTestingError(`Error adding node to network: ${e.message}`, e) + } + } + }, + { + title: 'Send prepare upgrade transaction', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + await this.prepareUpgradeNetworkNodes(config.freezeAdminPrivateKey, ctx.upgradeZipHash, config.nodeClient) + } + }, + { + title: 'Download generated files from an existing node', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const node1FullyQualifiedPodName = Templates.renderNetworkPodName(config.existingNodeIds[0]) + + // copy the config.txt file from the node1 upgrade directory + await self.k8.copyFrom(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/config.txt`, config.stagingDir) + + const signedKeyFiles = (await self.k8.listDir(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current`)).filter(file => file.name.startsWith(constants.SIGNING_KEY_PREFIX)) + for (const signedKeyFile of signedKeyFiles) { + await self.k8.execContainer(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, ['bash', '-c', `[[ ! -f "${constants.HEDERA_HAPI_PATH}/data/keys/${signedKeyFile.name}" ]] || cp ${constants.HEDERA_HAPI_PATH}/data/keys/${signedKeyFile.name} ${constants.HEDERA_HAPI_PATH}/data/keys/${signedKeyFile.name}.old`]) + await self.k8.copyFrom(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/${signedKeyFile.name}`, `${config.keysDir}`) + } + } + }, + { + title: 'Send freeze upgrade transaction', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + await this.freezeUpgradeNetworkNodes(config.freezeAdminPrivateKey, ctx.upgradeZipHash, config.nodeClient) + } + }, + { + title: 'Check network nodes are frozen', + task: (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const subTasks = [] + for (const nodeId of config.existingNodeIds) { + subTasks.push({ + title: `Check node: ${chalk.yellow(nodeId)}`, + task: () => self.checkNetworkNodeState(nodeId, 100, 'FREEZE_COMPLETE') + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Deploy new network node', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const index = config.existingNodeIds.length + let valuesArg = '' + for (let i = 0; i < index; i++) { + valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.serviceMap.get(config.existingNodeIds[i]).accountId}" --set "hedera.nodes[${i}].name=${config.existingNodeIds[i]}"` + } + valuesArg += ` --set "hedera.nodes[${index}].accountId=${ctx.newNode.accountId}" --set "hedera.nodes[${index}].name=${ctx.newNode.name}"` + + await self.chartManager.upgrade( + config.namespace, + constants.FULLSTACK_DEPLOYMENT_CHART, + config.chartPath, + valuesArg, + config.fstChartVersion + ) + + config.allNodeIds = [...config.existingNodeIds, config.nodeId] + } + }, + { + title: 'Check new network node pod is running', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + config.podNames[config.nodeId] = await this.checkNetworkNodePod(config.namespace, config.nodeId) + } + }, { title: 'Prepare staging directory', task: async (ctx, parentTask) => { - const config = ctx.config + const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = [ { title: 'Copy configuration files', @@ -1526,29 +1905,20 @@ export class NodeCommand extends BaseCommand { { title: 'Copy Gossip keys to staging', task: async (ctx, _) => { - const config = ctx.config + const config = /** @type {NodeAddConfigClass} **/ ctx.config - await this.copyGossipKeysToStaging(config, ctx.config.allNodeIds) + await this.copyGossipKeysToStaging(config.keyFormat, config.keysDir, config.stagingKeysDir, config.allNodeIds) } }, { title: 'Copy gRPC TLS keys to staging', task: async (ctx, _) => { - const config = ctx.config - for (const nodeId of ctx.config.allNodeIds) { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + for (const nodeId of config.allNodeIds) { const tlsKeyFiles = self.keyManager.prepareTLSKeyFilePaths(nodeId, config.keysDir) await self._copyNodeKeys(tlsKeyFiles, config.stagingKeysDir) } } - }, - { - title: 'Prepare config.txt for the network', - task: async (ctx, _) => { - const config = ctx.config - const configTxtPath = `${config.stagingDir}/config.txt` - const template = `${constants.RESOURCES_DIR}/templates/config.template` - await self.platformInstaller.prepareConfigTxt(config.allNodeIds, configTxtPath, config.releaseTag, config.chainId, template) - } } ] @@ -1559,43 +1929,44 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Fetch platform software into network nodes', + title: 'Fetch platform software into new network node', task: - async (ctx, task) => { - return self.fetchPlatformSoftware(ctx, task, self.platformInstaller) - } + async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + return self.fetchLocalOrReleasedPlatformSoftware(config.nodeIds, config.podNames, config.releaseTag, task) + } }, { - title: 'Freeze network nodes', - task: - async (ctx, task) => { - await this.freezeNetworkNodes(ctx.config) - } + title: 'Download last state from an existing node', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const node1FullyQualifiedPodName = Templates.renderNetworkPodName(config.existingNodeIds[0]) + const upgradeDirectory = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/0/123` + // zip the contents of the newest folder on node1 within /opt/hgcapp/services-hedera/HapiApp2.0/data/saved/com.hedera.services.ServicesMain/0/123/ + const zipFileName = await self.k8.execContainer(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, ['bash', '-c', `cd ${upgradeDirectory} && mapfile -t states < <(ls -1t .) && jar cf "\${states[0]}.zip" -C "\${states[0]}" . && echo -n \${states[0]}.zip`]) + await self.k8.copyFrom(node1FullyQualifiedPodName, constants.ROOT_CONTAINER, `${upgradeDirectory}/${zipFileName}`, config.stagingDir) + config.lastStateZipPath = `${config.stagingDir}/${zipFileName}` + } }, { - title: 'Check nodes are frozen', - task: (ctx, task) => { - const subTasks = [] - for (const nodeId of ctx.config.existingNodeIds) { - subTasks.push({ - title: `Check node: ${chalk.yellow(nodeId)}`, - task: () => self.checkNetworkNodeState(nodeId, 100, 'FREEZE_COMPLETE') - }) - } - - // set up the sub-tasks - return task.newListr(subTasks, { - concurrent: false, - rendererOptions: { - collapseSubtasks: false + title: 'Upload last saved state to new network node', + task: + async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + const newNodeFullyQualifiedPodName = Templates.renderNetworkPodName(config.nodeId) + const nodeNumber = Templates.nodeNumberFromNodeId(config.nodeId) + const savedStateDir = (config.lastStateZipPath.match(/\/(\d+)\.zip$/))[1] + const savedStatePath = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/${nodeNumber}/123/${savedStateDir}` + await self.k8.execContainer(newNodeFullyQualifiedPodName, constants.ROOT_CONTAINER, ['bash', '-c', `mkdir -p ${savedStatePath}`]) + await self.k8.copyTo(newNodeFullyQualifiedPodName, constants.ROOT_CONTAINER, config.lastStateZipPath, savedStatePath) + await self.platformInstaller.setPathPermission(newNodeFullyQualifiedPodName, constants.HEDERA_HAPI_PATH) + await self.k8.execContainer(newNodeFullyQualifiedPodName, constants.ROOT_CONTAINER, ['bash', '-c', `cd ${savedStatePath} && jar xf ${path.basename(config.lastStateZipPath)} && rm -f ${path.basename(config.lastStateZipPath)}`]) } - }) - } }, { - title: 'Setup network nodes', + title: 'Setup new network node', task: async (ctx, parentTask) => { - const config = ctx.config + const config = /** @type {NodeAddConfigClass} **/ ctx.config // modify application.properties to trick Hedera Services into receiving an updated address book await self.bumpHederaConfigVersion(`${config.stagingDir}/templates/application.properties`) @@ -1618,10 +1989,11 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Starting nodes', + title: 'Start network nodes', task: (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = [] - self.startNodes(ctx.config, ctx.config.allNodeIds, subTasks) + self.startNodes(config.podNames, config.allNodeIds, subTasks) // set up the sub-tasks return task.newListr(subTasks, { @@ -1634,9 +2006,11 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Check nodes are ACTIVE', - task: (ctx, task) => { + title: 'Check all nodes are ACTIVE', + task: async (ctx, task) => { const subTasks = [] + // sleep for 30 seconds to give time for the logs to roll over to prevent capturing an invalid "ACTIVE" string + await sleep(30000) for (const nodeId of ctx.config.allNodeIds) { subTasks.push({ title: `Check node: ${chalk.yellow(nodeId)}`, @@ -1654,12 +2028,13 @@ export class NodeCommand extends BaseCommand { } }, { - title: 'Check node proxies are ACTIVE', + title: 'Check all node proxies are ACTIVE', // this is more reliable than checking the nodes logs for ACTIVE, as the // logs will have a lot of white noise from being behind task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config const subTasks = [] - for (const nodeId of ctx.config.nodeIds) { + for (const nodeId of config.allNodeIds) { subTasks.push({ title: `Check proxy for node: ${chalk.yellow(nodeId)}`, task: async () => await self.k8.waitForPodReady( @@ -1677,6 +2052,28 @@ export class NodeCommand extends BaseCommand { }) } }, + { + title: 'Stake new node', + task: (ctx, _) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + self.addStake(config.namespace, ctx.newNode.accountId, config.nodeId) + } + }, + { + title: 'Trigger stake weight calculate', + task: async (ctx, task) => { + const config = /** @type {NodeAddConfigClass} **/ ctx.config + // sleep 60 seconds for the handler to be able to trigger the network node stake weight recalculate + await sleep(60000) + const accountMap = getNodeAccountMap(config.allNodeIds) + // send some write transactions to invoke the handler that will trigger the stake weight recalculate + for (const nodeId of config.allNodeIds) { + const accountId = accountMap.get(nodeId) + config.nodeClient.setOperator(TREASURY_ACCOUNT_ID, config.treasuryKey) + await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, accountId, 1) + } + } + }, { title: 'Finalize', task: (ctx, _) => { @@ -1694,6 +2091,7 @@ export class NodeCommand extends BaseCommand { try { await tasks.run() } catch (e) { + self.logger.error(`Error in setting up nodes: ${e.message}`, e) throw new FullstackTestingError(`Error in setting up nodes: ${e.message}`, e) } finally { await self.close() @@ -1702,10 +2100,7 @@ export class NodeCommand extends BaseCommand { return true } - async freezeNetworkNodes (config) { - await this.accountManager.loadNodeClient(config.namespace) - const client = this.accountManager._nodeClient - + async prepareUpgradeNetworkNodes (freezeAdminPrivateKey, upgradeZipHash, client) { try { // transfer some tiny amount to the freeze admin account await this.accountManager.transferAmount(constants.TREASURY_ACCOUNT_ID, FREEZE_ADMIN_ACCOUNT, 100000) @@ -1717,46 +2112,45 @@ export class NodeCommand extends BaseCommand { this.logger.debug(`Freeze admin account balance: ${balance.hbars}`) // set operator of freeze transaction as freeze admin account - const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace) - const freezeAdminPrivateKey = accountKeys.privateKey client.setOperator(FREEZE_ADMIN_ACCOUNT, freezeAdminPrivateKey) - // fetch special file - const fileId = FileId.fromString('0.0.150') - const fileQuery = new FileContentsQuery().setFileId(fileId) - const addressBookBytes = await fileQuery.execute(client) - const fileHash = crypto.createHash('sha384').update(addressBookBytes).digest('hex') - const prepareUpgradeTx = await new FreezeTransaction() .setFreezeType(FreezeType.PrepareUpgrade) - .setFileId(fileId) - .setFileHash(fileHash) + .setFileId(constants.UPGRADE_FILE_ID) + .setFileHash(upgradeZipHash) .freezeWith(client) .execute(client) const prepareUpgradeReceipt = await prepareUpgradeTx.getReceipt(client) this.logger.debug( - `Upgrade prepared with transaction id: ${prepareUpgradeTx.transactionId.toString()}`, + `sent prepare upgrade transaction [id: ${prepareUpgradeTx.transactionId.toString()}]`, prepareUpgradeReceipt.status.toString() ) + } catch (e) { + this.logger.error(`Error in prepare upgrade: ${e.message}`, e) + throw new FullstackTestingError(`Error in prepare upgrade: ${e.message}`, e) + } + } + async freezeUpgradeNetworkNodes (freezeAdminPrivateKey, upgradeZipHash, client) { + try { const futureDate = new Date() this.logger.debug(`Current time: ${futureDate}`) - futureDate.setTime(futureDate.getTime() + 20000) // 20 seconds in the future + futureDate.setTime(futureDate.getTime() + 5000) // 5 seconds in the future this.logger.debug(`Freeze time: ${futureDate}`) + client.setOperator(FREEZE_ADMIN_ACCOUNT, freezeAdminPrivateKey) const freezeUpgradeTx = await new FreezeTransaction() .setFreezeType(FreezeType.FreezeUpgrade) .setStartTimestamp(Timestamp.fromDate(futureDate)) - .setFileId(fileId) - .setFileHash(fileHash) + .setFileId(constants.UPGRADE_FILE_ID) + .setFileHash(upgradeZipHash) .freezeWith(client) .execute(client) const freezeUpgradeReceipt = await freezeUpgradeTx.getReceipt(client) - this.logger.debug(`Upgrade frozen with transaction id: ${freezeUpgradeTx.transactionId.toString()}`, freezeUpgradeReceipt.status.toString()) } catch (e) { @@ -1765,46 +2159,46 @@ export class NodeCommand extends BaseCommand { } } - startNodes (config, nodeIds, subTasks) { + startNodes (podNames, nodeIds, subTasks) { for (const nodeId of nodeIds) { - const podName = config.podNames[nodeId] + const podName = podNames[nodeId] subTasks.push({ title: `Start node: ${chalk.yellow(nodeId)}`, task: async () => { - await this.k8.execContainer(podName, constants.ROOT_CONTAINER, ['bash', '-c', `rm -rf ${constants.HEDERA_HAPI_PATH}/output/*`]) await this.k8.execContainer(podName, constants.ROOT_CONTAINER, ['systemctl', 'restart', 'network-node']) } }) } } - async copyGossipKeysToStaging (config, nodeIds) { + async copyGossipKeysToStaging (keyFormat, keysDir, stagingKeysDir, nodeIds) { // copy gossip keys to the staging for (const nodeId of nodeIds) { - switch (config.keyFormat) { + switch (keyFormat) { case constants.KEY_FORMAT_PEM: { - const signingKeyFiles = this.keyManager.prepareNodeKeyFilePaths(nodeId, config.keysDir, constants.SIGNING_KEY_PREFIX) - await this._copyNodeKeys(signingKeyFiles, config.stagingKeysDir) + const signingKeyFiles = this.keyManager.prepareNodeKeyFilePaths(nodeId, keysDir, constants.SIGNING_KEY_PREFIX) + await this._copyNodeKeys(signingKeyFiles, stagingKeysDir) // generate missing agreement keys - const agreementKeyFiles = this.keyManager.prepareNodeKeyFilePaths(nodeId, config.keysDir, constants.AGREEMENT_KEY_PREFIX) - await this._copyNodeKeys(agreementKeyFiles, config.stagingKeysDir) + const agreementKeyFiles = this.keyManager.prepareNodeKeyFilePaths(nodeId, keysDir, constants.AGREEMENT_KEY_PREFIX) + await this._copyNodeKeys(agreementKeyFiles, stagingKeysDir) break } case constants.KEY_FORMAT_PFX: { const privateKeyFile = Templates.renderGossipPfxPrivateKeyFile(nodeId) - fs.cpSync(`${config.keysDir}/${privateKeyFile}`, `${config.stagingKeysDir}/${privateKeyFile}`) - fs.cpSync(`${config.keysDir}/${constants.PUBLIC_PFX}`, `${config.stagingKeysDir}/${constants.PUBLIC_PFX}`) + fs.cpSync(`${keysDir}/${privateKeyFile}`, `${stagingKeysDir}/${privateKeyFile}`) + fs.cpSync(`${keysDir}/${constants.PUBLIC_PFX}`, `${stagingKeysDir}/${constants.PUBLIC_PFX}`) break } default: - throw new FullstackTestingError(`Unsupported key-format ${config.keyFormat}`) + throw new FullstackTestingError(`Unsupported key-format ${keyFormat}`) } } } + // Command Definition /** * Return Yargs command definition for 'node' command * @param nodeCmd an instance of NodeCommand @@ -1839,6 +2233,7 @@ export class NodeCommand extends BaseCommand { command: 'start', desc: 'Start a node', builder: y => flags.setCommandFlags(y, + flags.app, flags.namespace, flags.nodeIDs ), diff --git a/src/commands/prompts.mjs b/src/commands/prompts.mjs index bbc6ade13..81c8f4a8e 100644 --- a/src/commands/prompts.mjs +++ b/src/commands/prompts.mjs @@ -412,6 +412,38 @@ export async function promptAmount (task, input) { flags.amount.name) } +export async function promptNewNodeId (task, input) { + return await promptText(task, input, + flags.nodeID.definition.defaultValue, + 'Enter the new node id: ', + null, + flags.nodeID.name) +} + +export async function promptGossipEndpoints (task, input) { + return await promptText(task, input, + flags.gossipEndpoints.definition.defaultValue, + 'Enter the gossip endpoints(comma separated): ', + null, + flags.gossipEndpoints.name) +} + +export async function promptGrpcEndpoints (task, input) { + return await promptText(task, input, + flags.grpcEndpoints.definition.defaultValue, + 'Enter the gRPC endpoints(comma separated): ', + null, + flags.grpcEndpoints.name) +} + +export async function promptEndpointType (task, input) { + return await promptText(task, input, + flags.endpointType.definition.defaultValue, + 'Enter the endpoint type(IP or FQDN): ', + null, + flags.endpointType.name) +} + export function getPromptMap () { return new Map() .set(flags.accountId.name, promptAccountId) @@ -449,6 +481,10 @@ export function getPromptMap () { .set(flags.tlsClusterIssuerType.name, promptTlsClusterIssuerType) .set(flags.updateAccountKeys.name, promptUpdateAccountKeys) .set(flags.valuesFile.name, promptValuesFile) + .set(flags.nodeID.name, promptNewNodeId) + .set(flags.gossipEndpoints.name, promptGossipEndpoints) + .set(flags.grpcEndpoints.name, promptGrpcEndpoints) + .set(flags.endpointType.name, promptEndpointType) } // build the prompt registry diff --git a/src/core/account_manager.mjs b/src/core/account_manager.mjs index b2aabccc6..f464f1704 100644 --- a/src/core/account_manager.mjs +++ b/src/core/account_manager.mjs @@ -79,7 +79,12 @@ export class AccountManager { publicKey: Base64.decode(secret.data.publicKey) } } else { - return null + // if it isn't in the secrets we can load genesis key + return { + accountId, + privateKey: constants.GENESIS_KEY, + publicKey: PrivateKey.fromStringED25519(constants.GENESIS_KEY).publicKey.toString() + } } } @@ -92,18 +97,7 @@ export class AccountManager { */ async getTreasuryAccountKeys (namespace) { // check to see if the treasure account is in the secrets - let accountInfo = await this.getAccountKeysFromSecret(constants.TREASURY_ACCOUNT_ID, namespace) - - // if it isn't in the secrets we can load genesis key - if (!accountInfo) { - accountInfo = { - accountId: constants.TREASURY_ACCOUNT_ID, - privateKey: constants.GENESIS_KEY, - publicKey: PrivateKey.fromStringED25519(constants.GENESIS_KEY).publicKey.toString() - } - } - - return accountInfo + return await this.getAccountKeysFromSecret(constants.TREASURY_ACCOUNT_ID, namespace) } /** @@ -167,6 +161,8 @@ export class AccountManager { this._nodeClient = await this._getNodeClient(namespace, networkNodeServicesMap, treasuryAccountInfo.accountId, treasuryAccountInfo.privateKey) } + + return this._nodeClient } /** @@ -254,8 +250,7 @@ export class AccountManager { async getNodeServiceMap (namespace) { const labelSelector = 'fullstack.hedera.com/node-name' - /** @type {Map} **/ - const serviceBuilderMap = new Map() + const serviceBuilderMap = /** @type {Map} **/ new Map() const serviceList = await this.k8.kubeClient.listNamespacedService( namespace, undefined, undefined, undefined, undefined, labelSelector) @@ -478,11 +473,10 @@ export class AccountManager { async getAccountKeys (accountId) { const accountInfo = await this.accountInfoQuery(accountId) - let keys + let keys = [] if (accountInfo.key instanceof KeyList) { keys = accountInfo.key.toArray() } else { - keys = [] keys.push(accountInfo.key) } @@ -629,6 +623,7 @@ export class AccountManager { // ensure serviceEndpoint.ipAddressV4 value for all nodes in the addressBook is a 4 bytes array instead of string // See: https://github.com/hashgraph/hedera-protobufs/blob/main/services/basic_types.proto#L1309 + // TODO: with v0.53 will mirror node no longer need this and we can remove @hashgraph/proto? const addressBook = HashgraphProto.proto.NodeAddressBook.decode(addressBookBytes) const hasAlphaRegEx = /[a-zA-Z]+/ let modified = false diff --git a/src/core/constants.mjs b/src/core/constants.mjs index 29f5fd074..10f213cba 100644 --- a/src/core/constants.mjs +++ b/src/core/constants.mjs @@ -14,7 +14,7 @@ * limitations under the License. * */ -import { AccountId } from '@hashgraph/sdk' +import { AccountId, FileId } from '@hashgraph/sdk' import { color, PRESET_TIMER } from 'listr2' import { dirname, normalize } from 'path' import { fileURLToPath } from 'url' @@ -47,7 +47,7 @@ export const HEDERA_BUILDS_URL = 'https://builds.hedera.com' export const HEDERA_NODE_ACCOUNT_ID_START = AccountId.fromString(process.env.SOLO_NODE_ACCOUNT_ID_START || '0.0.3') export const HEDERA_NODE_INTERNAL_GOSSIP_PORT = process.env.SOLO_NODE_INTERNAL_GOSSIP_PORT || '50111' export const HEDERA_NODE_EXTERNAL_GOSSIP_PORT = process.env.SOLO_NODE_EXTERNAL_GOSSIP_PORT || '50111' -export const HEDERA_NODE_DEFAULT_STAKE_AMOUNT = process.env.SOLO_NODE_DEFAULT_STAKE_AMOUNT || 1 +export const HEDERA_NODE_DEFAULT_STAKE_AMOUNT = process.env.SOLO_NODE_DEFAULT_STAKE_AMOUNT || 500 // --------------- Charts related constants ---------------------------------------------------------------------------- export const FULLSTACK_SETUP_NAMESPACE = 'fullstack-setup' @@ -68,8 +68,9 @@ export const DEFAULT_CHART_REPO = new Map() export const OPERATOR_ID = process.env.SOLO_OPERATOR_ID || '0.0.2' export const OPERATOR_KEY = process.env.SOLO_OPERATOR_KEY || '302e020100300506032b65700422042091132178e72057a1d7528025956fe39b0b847f200ab59b2fdd367017f3087137' export const OPERATOR_PUBLIC_KEY = process.env.SOLO_OPERATOR_PUBLIC_KEY || '302a300506032b65700321000aa8e21064c61eab86e2a9c164565b4e7a9a4146106e0a6cd03a8c395a110e92' -export const FREEZE_ADMIN_ACCOUNT = process.env.FREEZE_ADMIN_ACCOUNT || '0.0.58' +export const FREEZE_ADMIN_ACCOUNT = process.env.FREEZE_ADMIN_ACCOUNT || `${HEDERA_NODE_ACCOUNT_ID_START.realm}.${HEDERA_NODE_ACCOUNT_ID_START.shard}.58` export const TREASURY_ACCOUNT_ID = `${HEDERA_NODE_ACCOUNT_ID_START.realm}.${HEDERA_NODE_ACCOUNT_ID_START.shard}.2` +export const COUNCIL_ACCOUNT_ID = `${HEDERA_NODE_ACCOUNT_ID_START.realm}.${HEDERA_NODE_ACCOUNT_ID_START.shard}.55` export const GENESIS_KEY = process.env.GENESIS_KEY || '302e020100300506032b65700422042091132178e72057a1d7528025956fe39b0b847f200ab59b2fdd367017f3087137' export const SYSTEM_ACCOUNTS = [[3, 100], [200, 349], [400, 750], [900, 1000]] // do account 0.0.2 last and outside the loop export const SHORTER_SYSTEM_ACCOUNTS = [[3, 60]] @@ -138,3 +139,12 @@ export const NODE_CLIENT_MAX_ATTEMPTS = process.env.NODE_CLIENT_MAX_ATTEMPTS || export const NODE_CLIENT_MIN_BACKOFF = process.env.NODE_CLIENT_MIN_BACKOFF || 1000 export const NODE_CLIENT_MAX_BACKOFF = process.env.NODE_CLIENT_MAX_BACKOFF || 1000 export const NODE_CLIENT_REQUEST_TIMEOUT = process.env.NODE_CLIENT_REQUEST_TIMEOUT || 120000 + +// ---- New Node Related ---- +export const ENDPOINT_TYPE_IP = 'IP' +export const ENDPOINT_TYPE_FQDN = 'FQDN' + +// file-id must be between 0.0.150 and 0.0.159 +// file must be uploaded using FileUpdateTransaction in maximum of 5Kb chunks +export const UPGRADE_FILE_ID = FileId.fromString('0.0.150') +export const UPGRADE_FILE_CHUNK_SIZE = 1024 * 5 // 5Kb diff --git a/src/core/helpers.mjs b/src/core/helpers.mjs index f16011b51..772c42f6e 100644 --- a/src/core/helpers.mjs +++ b/src/core/helpers.mjs @@ -37,19 +37,23 @@ export function sleep (ms) { } export function parseNodeIds (input) { + return splitFlagInput(input, ',') +} + +export function splitFlagInput (input, separator = ',') { if (typeof input === 'string') { - const nodeIds = [] - input.split(',').forEach(item => { - const nodeId = item.trim() - if (nodeId) { - nodeIds.push(nodeId) + const items = [] + input.split(separator).forEach(s => { + const item = s.trim() + if (s) { + items.push(item) } }) - return nodeIds + return items } - throw new FullstackTestingError('node IDs is not a comma separated string') + throw new FullstackTestingError('input is not a comma separated string') } export function cloneArray (arr) { @@ -145,7 +149,7 @@ export function backupOldTlsKeys (nodeIds, keysDir, curDate = new Date(), dirPre const fileMap = new Map() for (const nodeId of nodeIds) { const srcPath = path.join(keysDir, Templates.renderTLSPemPrivateKeyFile(nodeId)) - const destPath = path.join(backupDir, Templates.renderTLSPemPublicKeyFile(nodeId)) + const destPath = path.join(backupDir, Templates.renderTLSPemPrivateKeyFile(nodeId)) fileMap.set(srcPath, destPath) } @@ -159,7 +163,7 @@ export function backupOldPemKeys (nodeIds, keysDir, curDate = new Date(), dirPre const fileMap = new Map() for (const nodeId of nodeIds) { const srcPath = path.join(keysDir, Templates.renderGossipPemPrivateKeyFile(nodeId)) - const destPath = path.join(backupDir, Templates.renderGossipPemPublicKeyFile(nodeId)) + const destPath = path.join(backupDir, Templates.renderGossipPemPrivateKeyFile(nodeId)) fileMap.set(srcPath, destPath) } @@ -194,25 +198,30 @@ export function validatePath (input) { * @returns {Promise} A promise that resolves when the logs are downloaded */ export async function getNodeLogs (k8, namespace) { + k8.logger.debug('getNodeLogs: begin...') const pods = await k8.getPodsByLabel(['fullstack.hedera.com/type=network-node']) + const timeString = new Date().toISOString().replace(/:/g, '-').replace(/\./g, '-') + for (const pod of pods) { const podName = pod.metadata.name - const targetDir = `${SOLO_LOGS_DIR}/${namespace}/${podName}` + const targetDir = path.join(SOLO_LOGS_DIR, namespace, timeString) try { - if (fs.existsSync(targetDir)) { - fs.rmdirSync(targetDir, { recursive: true }) + if (!fs.existsSync(targetDir)) { + fs.mkdirSync(targetDir, { recursive: true }) } - fs.mkdirSync(targetDir, { recursive: true }) - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/output/swirlds.log`, targetDir) - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/output/hgcaa.log`, targetDir) - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, targetDir) - await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/settings.txt`, targetDir) + const scriptName = 'support-zip.sh' + const sourcePath = path.join(constants.RESOURCES_DIR, scriptName) // script source path + await k8.copyTo(podName, ROOT_CONTAINER, sourcePath, `${HEDERA_HAPI_PATH}`) + await k8.execContainer(podName, ROOT_CONTAINER, `chmod 0755 ${HEDERA_HAPI_PATH}/${scriptName}`) + await k8.execContainer(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${scriptName}`) + await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${podName}.zip`, targetDir) } catch (e) { // not throw error here, so we can continue to finish downloading logs from other pods // and also delete namespace in the end k8.logger.error(`failed to download logs from pod ${podName}`, e) } + k8.logger.debug('getNodeLogs: ...end') } } @@ -242,3 +251,14 @@ export function getEnvValue (envVarArray, name) { const kvPair = envVarArray.find(v => v.startsWith(`${name}=`)) return kvPair ? kvPair.split('=')[1] : null } + +export function parseIpAddressToUint8Array (ipAddress) { + const parts = ipAddress.split('.') + const uint8Array = new Uint8Array(4) + + for (let i = 0; i < 4; i++) { + uint8Array[i] = parseInt(parts[i], 10) + } + + return uint8Array +} diff --git a/src/core/k8.mjs b/src/core/k8.mjs index 026c28b13..2da966a6c 100644 --- a/src/core/k8.mjs +++ b/src/core/k8.mjs @@ -331,7 +331,7 @@ export class K8 { * @param containerName container name * @param destPath path inside the container * @param timeout timeout in ms - * @return {Promise<{}>} + * @return {Promise<[]>} */ async listDir (podName, containerName, destPath, timeout = 5000) { try { @@ -344,8 +344,13 @@ export class K8 { for (let line of lines) { line = line.replace(/\s+/g, '|') const parts = line.split('|') - if (parts.length === 9) { - const name = parts[parts.length - 1] + if (parts.length >= 9) { + let name = parts[parts.length - 1] + // handle unique file format (without single quotes): 'usedAddressBook_vHederaSoftwareVersion{hapiVersion=v0.53.0, servicesVersion=v0.53.0}_2024-07-30-20-39-06_node_0.txt.debug' + for (let i = parts.length - 1; i > 8; i--) { + name = `${parts[i - 1]} ${name}` + } + if (name !== '.' && name !== '..') { const permission = parts[0] const item = { @@ -424,7 +429,7 @@ export class K8 { return await this.execContainer( podName, containerName, - ['bash', '-c', '[[ -d "' + destPath + '" ]] && echo -n "true" || echo -n "false"'] + ['sh', '-c', '[[ -d "' + destPath + '" ]] && echo -n "true" || echo -n "false"'] ) === 'true' } @@ -432,7 +437,7 @@ export class K8 { return this.execContainer( podName, containerName, - ['bash', '-c', 'mkdir -p "' + destPath + '"'] + ['sh', '-c', 'mkdir -p "' + destPath + '"'] ) } @@ -595,11 +600,11 @@ export class K8 { } /** - * Invoke bash command within a container and return the console output as string + * Invoke sh command within a container and return the console output as string * * @param podName pod name * @param containerName container name - * @param command bash commands as an array to be run within the containerName (e.g 'ls -la /opt/hgcapp') + * @param command sh commands as an array to be run within the containerName (e.g 'ls -la /opt/hgcapp') * @param timeoutMs timout in milliseconds * @returns {Promise} console output as string */ diff --git a/src/core/key_manager.mjs b/src/core/key_manager.mjs index 8eb927f75..c3381db5b 100644 --- a/src/core/key_manager.mjs +++ b/src/core/key_manager.mjs @@ -193,8 +193,7 @@ export class KeyManager { }) self.logger.debug(`Stored ${keyName} key for node: ${nodeId}`, { - nodeKeyFiles, - cert: certPems[0] + nodeKeyFiles }) resolve(nodeKeyFiles) diff --git a/src/core/templates.mjs b/src/core/templates.mjs index c7f8e20c7..b587fc768 100644 --- a/src/core/templates.mjs +++ b/src/core/templates.mjs @@ -180,4 +180,12 @@ export class Templates { const parts = svcName.split('.') return this.nodeIdFromNetworkSvcName(parts[0]) } + + static nodeNumberFromNodeId (nodeId) { + for (let i = nodeId.length - 1; i > 0; i--) { + if (isNaN(nodeId[i])) { + return parseInt(nodeId.substring(i + 1, nodeId.length)) + } + } + } } diff --git a/test/e2e/commands/account.test.mjs b/test/e2e/commands/account.test.mjs index 20b92fdd4..3845e0f70 100644 --- a/test/e2e/commands/account.test.mjs +++ b/test/e2e/commands/account.test.mjs @@ -56,11 +56,12 @@ describe('AccountCommand', () => { // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined const bootstrapResp = bootstrapNetwork(testName, argv) + const accountCmd = new AccountCommand(bootstrapResp.opts, testSystemAccounts) + bootstrapResp.cmd.accountCmd = accountCmd const k8 = bootstrapResp.opts.k8 const accountManager = bootstrapResp.opts.accountManager const configManager = bootstrapResp.opts.configManager const nodeCmd = bootstrapResp.cmd.nodeCmd - const accountCmd = new AccountCommand(bootstrapResp.opts, testSystemAccounts) afterAll(async () => { await getNodeLogs(k8, namespace) @@ -106,6 +107,7 @@ describe('AccountCommand', () => { nodeCmd.logger.info(`Fetching account keys: accountId ${accountId}`) const keys = await accountManager.getAccountKeys(accountId) nodeCmd.logger.info(`Fetched account keys: accountId ${accountId}`) + expect(keys.length).not.toEqual(0) expect(keys[0].toString()).not.toEqual(genesisKey.toString()) }, 20000) } diff --git a/test/e2e/commands/cluster.test.mjs b/test/e2e/commands/cluster.test.mjs index 41b4d0939..0ca46ad54 100644 --- a/test/e2e/commands/cluster.test.mjs +++ b/test/e2e/commands/cluster.test.mjs @@ -66,6 +66,7 @@ describe('ClusterCommand', () => { const clusterCmd = bootstrapResp.cmd.clusterCmd afterAll(async () => { + await chartManager.isChartInstalled(constants.FULLSTACK_SETUP_NAMESPACE, constants.FULLSTACK_CLUSTER_SETUP_CHART) await getNodeLogs(k8, namespace) await k8.deleteNamespace(namespace) await accountManager.close() diff --git a/test/e2e/commands/mirror_node.test.mjs b/test/e2e/commands/mirror_node.test.mjs index 53711ce70..f017eb708 100644 --- a/test/e2e/commands/mirror_node.test.mjs +++ b/test/e2e/commands/mirror_node.test.mjs @@ -29,6 +29,7 @@ import { constants } from '../../../src/core/index.mjs' import { + accountCreationShouldSucceed, balanceQueryShouldSucceed, bootstrapNetwork, getDefaultArgv, @@ -160,6 +161,10 @@ describe('MirrorNodeCommand', () => { } }, 60000) + // trigger some extra transactions to trigger MirrorNode to fetch the transactions + accountCreationShouldSucceed(accountManager, mirrorNodeCmd, namespace) + accountCreationShouldSucceed(accountManager, mirrorNodeCmd, namespace) + it('Check submit message result should success', async () => { expect.assertions(1) try { diff --git a/test/e2e/commands/node-add.test.mjs b/test/e2e/commands/node-add.test.mjs new file mode 100644 index 000000000..1c7715be2 --- /dev/null +++ b/test/e2e/commands/node-add.test.mjs @@ -0,0 +1,100 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @jest-environment steps + */ +import { afterAll, beforeAll, describe, expect, it } from '@jest/globals' +import { flags } from '../../../src/commands/index.mjs' +import { constants } from '../../../src/core/index.mjs' +import { + accountCreationShouldSucceed, + balanceQueryShouldSucceed, + bootstrapNetwork, + getDefaultArgv, getNodeIdsPrivateKeysHash, getTestConfigManager, getTmpDir, + HEDERA_PLATFORM_VERSION_TAG +} from '../../test_util.js' +import { getNodeLogs } from '../../../src/core/helpers.mjs' +import { NodeCommand } from '../../../src/commands/node.mjs' + +describe('Node add', () => { + const defaultTimeout = 120000 + const namespace = 'node-add' + const nodeId = 'node4' + const argv = getDefaultArgv() + argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM + argv[flags.nodeIDs.name] = 'node1,node2,node3' + argv[flags.nodeID.name] = nodeId + argv[flags.generateGossipKeys.name] = true + argv[flags.generateTlsKeys.name] = true + argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM + // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts + argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined + argv[flags.releaseTag.name] = HEDERA_PLATFORM_VERSION_TAG + argv[flags.namespace.name] = namespace + const bootstrapResp = bootstrapNetwork(namespace, argv) + const nodeCmd = bootstrapResp.cmd.nodeCmd + const accountCmd = bootstrapResp.cmd.accountCmd + const k8 = bootstrapResp.opts.k8 + let existingServiceMap + let existingNodeIdsPrivateKeysHash + + beforeAll(async () => { + const configManager = getTestConfigManager(`${namespace}-solo.config`) + configManager.update(argv, true) + existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace) + existingNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, constants.KEY_FORMAT_PEM, k8, getTmpDir()) + }, defaultTimeout) + + afterAll(async () => { + await getNodeLogs(k8, namespace) + await k8.deleteNamespace(namespace) + }, 600000) + + it('should succeed with init command', async () => { + const status = await accountCmd.init(argv) + expect(status).toBeTruthy() + }, 450000) + + it('should add a new node to the network successfully', async () => { + await nodeCmd.add(argv) + expect(nodeCmd.getUnusedConfigs(NodeCommand.ADD_CONFIGS_NAME)).toEqual([ + flags.apiPermissionProperties.constName, + flags.applicationProperties.constName, + flags.bootstrapProperties.constName, + flags.chainId.constName, + flags.devMode.constName, + flags.log4j2Xml.constName, + flags.settingTxt.constName + ]) + await nodeCmd.accountManager.close() + }, 600000) + + balanceQueryShouldSucceed(nodeCmd.accountManager, nodeCmd, namespace) + + accountCreationShouldSucceed(nodeCmd.accountManager, nodeCmd, namespace) + + it('existing nodes private keys should not have changed', async () => { + const currentNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, constants.KEY_FORMAT_PEM, k8, getTmpDir()) + + for (const [nodeId, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { + const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeId) + + for (const [keyFileName, existingKeyHash] of existingKeyHashMap.entries()) { + expect(`${nodeId}:${keyFileName}:${currentNodeKeyHashMap.get(keyFileName)}`).toEqual( + `${nodeId}:${keyFileName}:${existingKeyHash}`) + } + } + }, defaultTimeout) +}) diff --git a/test/e2e/commands/node-local-hedera.test.mjs b/test/e2e/commands/node-local-hedera.test.mjs index dbb6f5ef6..3e805a9d4 100644 --- a/test/e2e/commands/node-local-hedera.test.mjs +++ b/test/e2e/commands/node-local-hedera.test.mjs @@ -44,7 +44,7 @@ describe('Node local build', () => { afterAll(async () => { await getNodeLogs(hederaK8, LOCAL_HEDERA) await hederaK8.deleteNamespace(LOCAL_HEDERA) - }, 120000) + }, 600000) describe('Node for hedera app should start successfully', () => { console.log('Starting local build for Hedera app') diff --git a/test/e2e/commands/node-local-ptt.test.mjs b/test/e2e/commands/node-local-ptt.test.mjs index 2ef952b0d..ec47b9795 100644 --- a/test/e2e/commands/node-local-ptt.test.mjs +++ b/test/e2e/commands/node-local-ptt.test.mjs @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. * + * @jest-environment steps */ import { afterAll, diff --git a/test/e2e/commands/node_pem_stop_add.test.mjs b/test/e2e/commands/node_pem_stop.test.mjs similarity index 84% rename from test/e2e/commands/node_pem_stop_add.test.mjs rename to test/e2e/commands/node_pem_stop.test.mjs index d65d65d6a..055317bed 100644 --- a/test/e2e/commands/node_pem_stop_add.test.mjs +++ b/test/e2e/commands/node_pem_stop.test.mjs @@ -18,8 +18,8 @@ import { describe } from '@jest/globals' import { constants } from '../../../src/core/index.mjs' -import { e2eNodeKeyRefreshAddTest } from '../e2e_node_util.js' +import { e2eNodeKeyRefreshTest } from '../e2e_node_util.js' describe('NodeCommand', () => { - e2eNodeKeyRefreshAddTest(constants.KEY_FORMAT_PEM, 'node-cmd-e2e-pem', 'stop') + e2eNodeKeyRefreshTest(constants.KEY_FORMAT_PEM, 'node-cmd-e2e-pem', 'stop') }) diff --git a/test/e2e/commands/node_pfx_kill_add.test.mjs b/test/e2e/commands/node_pfx_kill.test.mjs similarity index 84% rename from test/e2e/commands/node_pfx_kill_add.test.mjs rename to test/e2e/commands/node_pfx_kill.test.mjs index 912b63521..f16f4b94a 100644 --- a/test/e2e/commands/node_pfx_kill_add.test.mjs +++ b/test/e2e/commands/node_pfx_kill.test.mjs @@ -18,8 +18,8 @@ import { describe } from '@jest/globals' import { constants } from '../../../src/core/index.mjs' -import { e2eNodeKeyRefreshAddTest } from '../e2e_node_util.js' +import { e2eNodeKeyRefreshTest } from '../e2e_node_util.js' describe('NodeCommand', () => { - e2eNodeKeyRefreshAddTest(constants.KEY_FORMAT_PFX, 'node-cmd-e2e-pfx', 'kill') + e2eNodeKeyRefreshTest(constants.KEY_FORMAT_PFX, 'node-cmd-e2e-pfx', 'kill') }) diff --git a/test/e2e/core/account_manager.test.mjs b/test/e2e/core/account_manager.test.mjs index 406c21e97..d7c79ce60 100644 --- a/test/e2e/core/account_manager.test.mjs +++ b/test/e2e/core/account_manager.test.mjs @@ -14,19 +14,33 @@ * limitations under the License. * */ -import { describe, expect, it } from '@jest/globals' -import path from 'path' +import { afterAll, describe, expect, it } from '@jest/globals' import { flags } from '../../../src/commands/index.mjs' -import { AccountManager } from '../../../src/core/account_manager.mjs' -import { ConfigManager, constants, K8 } from '../../../src/core/index.mjs' -import { getTestCacheDir, testLogger } from '../../test_util.js' +import { + bootstrapNetwork, + getDefaultArgv, + TEST_CLUSTER +} from '../../test_util.js' +import * as version from '../../../version.mjs' describe('AccountManager', () => { - const configManager = new ConfigManager(testLogger, path.join(getTestCacheDir('accountCmd'), 'solo.config')) - configManager.setFlag(flags.namespace, 'solo-e2e') + const namespace = 'account-mngr-e2e' + const argv = getDefaultArgv() + argv[flags.namespace.name] = namespace + argv[flags.nodeIDs.name] = 'node0' + argv[flags.clusterName.name] = TEST_CLUSTER + argv[flags.fstChartVersion.name] = version.FST_CHART_VERSION + // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts + argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined + const bootstrapResp = bootstrapNetwork(namespace, argv, undefined, undefined, undefined, undefined, undefined, undefined, false) + const k8 = bootstrapResp.opts.k8 + const accountManager = bootstrapResp.opts.accountManager + const configManager = bootstrapResp.opts.configManager - const k8 = new K8(configManager, testLogger) - const accountManager = new AccountManager(testLogger, k8, constants) + afterAll(async () => { + await k8.deleteNamespace(namespace) + await accountManager.close() + }, 180000) it('should be able to stop port forwards', async () => { expect.assertions(4) diff --git a/test/e2e/core/chart_manager.test.mjs b/test/e2e/core/chart_manager.test.mjs index 5761ccc92..29e33eb8e 100644 --- a/test/e2e/core/chart_manager.test.mjs +++ b/test/e2e/core/chart_manager.test.mjs @@ -23,9 +23,11 @@ describe('ChartManager', () => { const helm = new Helm(testLogger) const chartManager = new ChartManager(helm, testLogger) const configManager = new ConfigManager(testLogger) + const argv = [] beforeAll(() => { - configManager.load() + argv[flags.namespace.name] = constants.FULLSTACK_SETUP_NAMESPACE + configManager.update(argv) }) it('should be able to list installed charts', async () => { @@ -38,7 +40,7 @@ describe('ChartManager', () => { it('should be able to check if a chart is installed', async () => { const ns = configManager.getFlag(flags.namespace) expect(ns).not.toBeNull() - const isInstalled = await chartManager.isChartInstalled(ns, constants.FULLSTACK_DEPLOYMENT_CHART) + const isInstalled = await chartManager.isChartInstalled(ns, constants.FULLSTACK_CLUSTER_SETUP_CHART) expect(isInstalled).toBeTruthy() }) }) diff --git a/test/e2e/core/k8_e2e.test.mjs b/test/e2e/core/k8_e2e.test.mjs index 981f6f614..c85428116 100644 --- a/test/e2e/core/k8_e2e.test.mjs +++ b/test/e2e/core/k8_e2e.test.mjs @@ -14,7 +14,7 @@ * limitations under the License. * */ -import { beforeAll, describe, expect, it } from '@jest/globals' +import { afterAll, beforeAll, describe, expect, it } from '@jest/globals' import fs from 'fs' import net from 'net' import os from 'os' @@ -23,6 +23,19 @@ import { v4 as uuid4 } from 'uuid' import { FullstackTestingError } from '../../../src/core/errors.mjs' import { ConfigManager, constants, logging, Templates } from '../../../src/core/index.mjs' import { K8 } from '../../../src/core/k8.mjs' +import { flags } from '../../../src/commands/index.mjs' +import { + V1Container, + V1ObjectMeta, + V1PersistentVolumeClaim, + V1PersistentVolumeClaimSpec, + V1Pod, + V1PodSpec, + V1Service, + V1ServicePort, + V1ServiceSpec, + V1VolumeResourceRequirements +} from '@kubernetes/client-node' const defaultTimeout = 20000 @@ -30,9 +43,61 @@ describe('K8', () => { const testLogger = logging.NewLogger('debug', true) const configManager = new ConfigManager(testLogger) const k8 = new K8(configManager, testLogger) + const testNamespace = 'k8-e2e' + const argv = [] + const podName = 'test-pod' + const containerName = 'alpine' + const podLabel = 'app=test' + const serviceName = 'test-service' + + beforeAll(async () => { + try { + argv[flags.namespace.name] = testNamespace + configManager.update(argv) + await k8.createNamespace(testNamespace) + const v1Pod = new V1Pod() + const v1Metadata = new V1ObjectMeta() + v1Metadata.name = podName + v1Metadata.namespace = testNamespace + v1Metadata.labels = { app: 'test' } + v1Pod.metadata = v1Metadata + const v1Container = new V1Container() + v1Container.name = containerName + v1Container.image = 'alpine:latest' + v1Container.command = ['/bin/sh', '-c', 'sleep 7200'] + const v1Spec = new V1PodSpec() + v1Spec.containers = [v1Container] + v1Pod.spec = v1Spec + await k8.kubeClient.createNamespacedPod(testNamespace, v1Pod) + const v1Svc = new V1Service() + const v1SvcMetadata = new V1ObjectMeta() + v1SvcMetadata.name = serviceName + v1SvcMetadata.namespace = testNamespace + v1SvcMetadata.labels = { app: 'svc-test' } + v1Svc.metadata = v1SvcMetadata + const v1SvcSpec = new V1ServiceSpec() + const v1SvcPort = new V1ServicePort() + v1SvcPort.port = 80 + v1SvcPort.targetPort = 80 + v1SvcSpec.ports = [v1SvcPort] + v1Svc.spec = v1SvcSpec + await k8.kubeClient.createNamespacedService(testNamespace, v1Svc) + } catch (e) { + console.log(e) + throw e + } + }, defaultTimeout) - beforeAll(() => { - configManager.load() + afterAll(async () => { + try { + await k8.kubeClient.deleteNamespacedPod(podName, testNamespace, undefined, undefined, 1) + await k8.deleteNamespace(testNamespace) + argv[flags.namespace.name] = constants.FULLSTACK_SETUP_NAMESPACE + configManager.update(argv) + } catch (e) { + console.log(e) + throw e + } }, defaultTimeout) it('should be able to list clusters', async () => { @@ -57,29 +122,54 @@ describe('K8', () => { await expect(k8.deleteNamespace(name)).resolves.toBeTruthy() }, defaultTimeout) + it('should be able to run wait for pod', async () => { + const labels = [podLabel] + + const pods = await k8.waitForPods([constants.POD_PHASE_RUNNING], labels, 1) + expect(pods.length).toStrictEqual(1) + }, defaultTimeout) + + it('should be able to run wait for pod ready', async () => { + const labels = [podLabel] + + const pods = await k8.waitForPodReady(labels, 1) + expect(pods.length).toStrictEqual(1) + }, defaultTimeout) + + it('should be able to run wait for pod conditions', async () => { + const labels = [podLabel] + + const conditions = new Map() + .set(constants.POD_CONDITION_INITIALIZED, constants.POD_CONDITION_STATUS_TRUE) + .set(constants.POD_CONDITION_POD_SCHEDULED, constants.POD_CONDITION_STATUS_TRUE) + .set(constants.POD_CONDITION_READY, constants.POD_CONDITION_STATUS_TRUE) + const pods = await k8.waitForPodConditions(conditions, labels, 1) + expect(pods.length).toStrictEqual(1) + }, defaultTimeout) + it('should be able to detect pod IP of a pod', async () => { - const podName = Templates.renderNetworkPodName('node0') + const pods = await k8.getPodsByLabel([podLabel]) + const podName = pods[0].metadata.name await expect(k8.getPodIP(podName)).resolves.not.toBeNull() await expect(k8.getPodIP('INVALID')).rejects.toThrow(FullstackTestingError) }, defaultTimeout) it('should be able to detect cluster IP', async () => { - const svcName = Templates.renderNetworkSvcName('node0') - await expect(k8.getClusterIP(svcName)).resolves.not.toBeNull() + await expect(k8.getClusterIP(serviceName)).resolves.not.toBeNull() await expect(k8.getClusterIP('INVALID')).rejects.toThrow(FullstackTestingError) }, defaultTimeout) it('should be able to check if a path is directory inside a container', async () => { - const podName = Templates.renderNetworkPodName('node0') - await expect(k8.hasDir(podName, constants.ROOT_CONTAINER, constants.HEDERA_USER_HOME_DIR)).resolves.toBeTruthy() + const pods = await k8.getPodsByLabel([podLabel]) + const podName = pods[0].metadata.name + await expect(k8.hasDir(podName, containerName, '/tmp')).resolves.toBeTruthy() }, defaultTimeout) it('should be able to copy a file to and from a container', async () => { - const podName = Templates.renderNetworkPodName('node0') - const containerName = constants.ROOT_CONTAINER - + const pods = await k8.waitForPodReady([podLabel], 1, 20) + expect(pods.length).toStrictEqual(1) const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'k8-')) - const destDir = constants.HEDERA_USER_HOME_DIR + const destDir = '/tmp' const srcPath = 'test/data/pem/keys/a-private-node0.pem' const destPath = `${destDir}/a-private-node0.pem` @@ -122,58 +212,37 @@ describe('K8', () => { testLogger.showUserError(e) expect(e).toBeNull() } + // TODO enhance this test to do something with the port, this pod isn't even running, but it is still passing }, defaultTimeout) - it('should be able to run wait for pod', async () => { - const labels = [ - 'fullstack.hedera.com/type=network-node' - ] - - const pods = await k8.waitForPods([constants.POD_PHASE_RUNNING], labels, 1) - expect(pods.length).toStrictEqual(1) - }, defaultTimeout) - - it('should be able to run wait for pod ready', async () => { - const labels = [ - 'fullstack.hedera.com/type=network-node' - ] - - const pods = await k8.waitForPodReady(labels, 1) - expect(pods.length).toStrictEqual(1) - }, defaultTimeout) - - it('should be able to run wait for pod conditions', async () => { - const labels = [ - 'fullstack.hedera.com/type=network-node' - ] - - const conditions = new Map() - .set(constants.POD_CONDITION_INITIALIZED, constants.POD_CONDITION_STATUS_TRUE) - .set(constants.POD_CONDITION_POD_SCHEDULED, constants.POD_CONDITION_STATUS_TRUE) - .set(constants.POD_CONDITION_READY, constants.POD_CONDITION_STATUS_TRUE) - const pods = await k8.waitForPodConditions(conditions, labels, 1) - expect(pods.length).toStrictEqual(1) - }, defaultTimeout) - - it('should be able to cat a log file inside the container', async () => { - const podName = Templates.renderNetworkPodName('node0') - const containerName = constants.ROOT_CONTAINER - const testFileName = 'test.txt' - const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'k8-')) - const tmpFile = path.join(tmpDir, testFileName) - const destDir = constants.HEDERA_USER_HOME_DIR - const destPath = `${destDir}/${testFileName}` - fs.writeFileSync(tmpFile, 'TEST\nNow current platform status = ACTIVE') - - await expect(k8.copyTo(podName, containerName, tmpFile, destDir)).resolves.toBeTruthy() - const output = await k8.execContainer(podName, containerName, ['tail', '-10', destPath]) - expect(output.indexOf('Now current platform status = ACTIVE')).toBeGreaterThan(0) - - fs.rmdirSync(tmpDir, { recursive: true }) + it('should be able to cat a file inside the container', async () => { + const pods = await k8.getPodsByLabel([podLabel]) + const podName = pods[0].metadata.name + const output = await k8.execContainer(podName, containerName, ['cat', '/etc/hostname']) + expect(output.indexOf(podName)).toEqual(0) }, defaultTimeout) it('should be able to list persistent volume claims', async () => { - const pvcs = await k8.listPvcsByNamespace(k8._getNamespace()) - expect(pvcs.length).toBeGreaterThan(0) + let response + try { + const v1Pvc = new V1PersistentVolumeClaim() + v1Pvc.name = 'test-pvc' + const v1Spec = new V1PersistentVolumeClaimSpec() + v1Spec.accessModes = ['ReadWriteOnce'] + const v1ResReq = new V1VolumeResourceRequirements() + v1ResReq.requests = { storage: '50Mi' } + v1Spec.resources = v1ResReq + v1Pvc.spec = v1Spec + const v1Metadata = new V1ObjectMeta() + v1Metadata.name = 'test-pvc' + v1Pvc.metadata = v1Metadata + response = await k8.kubeClient.createNamespacedPersistentVolumeClaim(testNamespace, v1Pvc) + console.log(response) + const pvcs = await k8.listPvcsByNamespace(testNamespace) + expect(pvcs.length).toBeGreaterThan(0) + } catch (e) { + console.error(e) + throw e + } }, defaultTimeout) }) diff --git a/test/e2e/core/platform_installer_e2e.test.mjs b/test/e2e/core/platform_installer_e2e.test.mjs index 97458a9c8..adc502573 100644 --- a/test/e2e/core/platform_installer_e2e.test.mjs +++ b/test/e2e/core/platform_installer_e2e.test.mjs @@ -14,30 +14,48 @@ * limitations under the License. * */ -import { beforeAll, describe, expect, it } from '@jest/globals' +import { afterAll, beforeAll, describe, expect, it } from '@jest/globals' import { - PlatformInstaller, constants, - Templates, - ConfigManager, Templates as Template + Templates } from '../../../src/core/index.mjs' import * as fs from 'fs' -import { K8 } from '../../../src/core/k8.mjs' -import { getTestCacheDir, getTmpDir, testLogger } from '../../test_util.js' -import { AccountManager } from '../../../src/core/account_manager.mjs' +import { + bootstrapNetwork, + getDefaultArgv, + getTestCacheDir, + getTmpDir, TEST_CLUSTER, + testLogger +} from '../../test_util.js' +import { flags } from '../../../src/commands/index.mjs' +import * as version from '../../../version.mjs' const defaultTimeout = 20000 describe('PackageInstallerE2E', () => { - const configManager = new ConfigManager(testLogger) - const k8 = new K8(configManager, testLogger) - const accountManager = new AccountManager(testLogger, k8) - const installer = new PlatformInstaller(testLogger, k8, configManager, accountManager) + const namespace = 'pkg-installer-e2e' + const argv = getDefaultArgv() + argv[flags.namespace.name] = namespace + argv[flags.nodeIDs.name] = 'node0' + argv[flags.clusterName.name] = TEST_CLUSTER + argv[flags.fstChartVersion.name] = version.FST_CHART_VERSION + // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts + argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined + const bootstrapResp = bootstrapNetwork(namespace, argv, undefined, undefined, undefined, undefined, undefined, undefined, false) + const k8 = bootstrapResp.opts.k8 + const accountManager = bootstrapResp.opts.accountManager + const configManager = bootstrapResp.opts.configManager + const installer = bootstrapResp.opts.platformInstaller const testCacheDir = getTestCacheDir() const podName = 'network-node0-0' const packageVersion = 'v0.42.5' + afterAll(async () => { + await k8.deleteNamespace(namespace) + await accountManager.close() + }, 180000) + beforeAll(async () => { if (!fs.existsSync(testCacheDir)) { fs.mkdirSync(testCacheDir) @@ -93,7 +111,7 @@ describe('PackageInstallerE2E', () => { expect(configLines.length).toBe(4) expect(configLines[0]).toBe(`swirld, ${chainId}`) expect(configLines[1]).toBe(`app, ${constants.HEDERA_APP_NAME}`) - expect(configLines[2]).toContain('address, 0, node0, node0, 1') + expect(configLines[2]).toContain(`address, 0, node0, node0, ${constants.HEDERA_NODE_DEFAULT_STAKE_AMOUNT}`) expect(configLines[3]).toBe('nextNodeId, 1') // verify the file exists @@ -144,7 +162,7 @@ describe('PackageInstallerE2E', () => { describe('copyTLSKeys', () => { it('should succeed to copy TLS keys for node0', async () => { const nodeId = 'node0' - const podName = Template.renderNetworkPodName(nodeId) + const podName = Templates.renderNetworkPodName(nodeId) const tmpDir = getTmpDir() // create mock files diff --git a/test/e2e/e2e_node_util.js b/test/e2e/e2e_node_util.js index ee39884dd..44104bf77 100644 --- a/test/e2e/e2e_node_util.js +++ b/test/e2e/e2e_node_util.js @@ -16,12 +16,6 @@ * @jest-environment steps */ -import { - AccountCreateTransaction, - Hbar, - HbarUnit, - PrivateKey -} from '@hashgraph/sdk' import { afterAll, afterEach, @@ -32,298 +26,172 @@ import { } from '@jest/globals' import { flags } from '../../src/commands/index.mjs' import { - constants, Templates -} from '../../src/core/index.mjs' -import { + accountCreationShouldSucceed, balanceQueryShouldSucceed, bootstrapNetwork, getDefaultArgv, getTestConfigManager, - getTmpDir, HEDERA_PLATFORM_VERSION_TAG, TEST_CLUSTER } from '../test_util.js' import { getNodeLogs, sleep } from '../../src/core/helpers.mjs' -import path from 'path' -import fs from 'fs' -import crypto from 'crypto' -import { ROOT_CONTAINER, SHORTER_SYSTEM_ACCOUNTS } from '../../src/core/constants.mjs' import { NodeCommand } from '../../src/commands/node.mjs' -import { AccountCommand } from '../../src/commands/account.mjs' -export function e2eNodeKeyRefreshAddTest (keyFormat, testName, mode, releaseTag = HEDERA_PLATFORM_VERSION_TAG) { +export function e2eNodeKeyRefreshTest (keyFormat, testName, mode, releaseTag = HEDERA_PLATFORM_VERSION_TAG) { const defaultTimeout = 120000 - describe(`NodeCommand [testName ${testName}, mode ${mode}, keyFormat: ${keyFormat}, release ${releaseTag}]`, () => { - const namespace = testName - const argv = getDefaultArgv() - argv[flags.namespace.name] = namespace - argv[flags.releaseTag.name] = releaseTag - argv[flags.keyFormat.name] = keyFormat - argv[flags.nodeIDs.name] = 'node0,node1,node2,node3' - argv[flags.generateGossipKeys.name] = true - argv[flags.generateTlsKeys.name] = true - argv[flags.clusterName.name] = TEST_CLUSTER - argv[flags.devMode.name] = true - // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts - argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR ? process.env.SOLO_FST_CHARTS_DIR : undefined - - const bootstrapResp = bootstrapNetwork(testName, argv) - const accountManager = bootstrapResp.opts.accountManager - const k8 = bootstrapResp.opts.k8 - const nodeCmd = bootstrapResp.cmd.nodeCmd - const accountCmd = new AccountCommand(bootstrapResp.opts, SHORTER_SYSTEM_ACCOUNTS) - - afterEach(async () => { - await nodeCmd.close() - await accountManager.close() - }, defaultTimeout) - - afterAll(async () => { - await getNodeLogs(k8, namespace) - await k8.deleteNamespace(namespace) - }, 180000) - - describe(`Node should have started successfully [mode ${mode}, release ${releaseTag}, keyFormat: ${keyFormat}]`, () => { - balanceQueryShouldSucceed(accountManager, nodeCmd, namespace) - - accountCreationShouldSucceed(accountManager, nodeCmd, namespace) - - it(`Node Proxy should be UP [mode ${mode}, release ${releaseTag}, keyFormat: ${keyFormat}`, async () => { - expect.assertions(1) - - try { - await expect(k8.waitForPodReady( - ['app=haproxy-node0', 'fullstack.hedera.com/type=haproxy'], - 1, 300, 1000)).resolves.toBeTruthy() - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } finally { + describe( + `NodeCommand [testName ${testName}, mode ${mode}, keyFormat: ${keyFormat}, release ${releaseTag}]`, + () => { + const namespace = testName + const argv = getDefaultArgv() + argv[flags.namespace.name] = namespace + argv[flags.releaseTag.name] = releaseTag + argv[flags.keyFormat.name] = keyFormat + argv[flags.nodeIDs.name] = 'node0,node1,node2' + argv[flags.generateGossipKeys.name] = true + argv[flags.generateTlsKeys.name] = true + argv[flags.clusterName.name] = TEST_CLUSTER + argv[flags.devMode.name] = true + // set the env variable SOLO_FST_CHARTS_DIR if developer wants to use local FST charts + argv[flags.chartDirectory.name] = process.env.SOLO_FST_CHARTS_DIR + ? process.env.SOLO_FST_CHARTS_DIR + : undefined + + const bootstrapResp = bootstrapNetwork(testName, argv) + const accountManager = bootstrapResp.opts.accountManager + const k8 = bootstrapResp.opts.k8 + const nodeCmd = bootstrapResp.cmd.nodeCmd + + afterEach(async () => { await nodeCmd.close() + await accountManager.close() + }, defaultTimeout) + + afterAll(async () => { + await getNodeLogs(k8, namespace) + await k8.deleteNamespace(namespace) + }, 600000) + + describe( + `Node should have started successfully [mode ${mode}, release ${releaseTag}, keyFormat: ${keyFormat}]`, + () => { + balanceQueryShouldSucceed(accountManager, nodeCmd, namespace) + + accountCreationShouldSucceed(accountManager, nodeCmd, namespace) + + it(`Node Proxy should be UP [mode ${mode}, release ${releaseTag}, keyFormat: ${keyFormat}`, + async () => { + expect.assertions(1) + + try { + await expect(k8.waitForPodReady( + ['app=haproxy-node0', + 'fullstack.hedera.com/type=haproxy'], + 1, 300, 1000)).resolves.toBeTruthy() + } catch (e) { + nodeCmd.logger.showUserError(e) + expect(e).toBeNull() + } finally { + await nodeCmd.close() + } + }, defaultTimeout) + }) + + describe( + `Node should refresh successfully [mode ${mode}, release ${releaseTag}, keyFormat: ${keyFormat}]`, + () => { + const nodeId = 'node0' + + beforeAll(async () => { + const podName = await nodeRefreshTestSetup(argv, testName, k8, + nodeId) + if (mode === 'kill') { + const resp = await k8.kubeClient.deleteNamespacedPod(podName, + namespace) + expect(resp.response.statusCode).toEqual(200) + await sleep(20000) // sleep to wait for pod to finish terminating + } else if (mode === 'stop') { + await expect(nodeCmd.stop(argv)).resolves.toBeTruthy() + await sleep(20000) // give time for node to stop and update its logs + } else { + throw new Error(`invalid mode: ${mode}`) + } + }, 120000) + + nodePodShouldBeRunning(nodeCmd, namespace, nodeId) + + nodeShouldNotBeActive(nodeCmd, nodeId) + + nodeRefreshShouldSucceed(nodeId, nodeCmd, argv) + + balanceQueryShouldSucceed(accountManager, nodeCmd, namespace) + + accountCreationShouldSucceed(accountManager, nodeCmd, namespace) + }) + + function nodePodShouldBeRunning (nodeCmd, namespace, nodeId) { + it(`${nodeId} should be running`, async () => { + try { + await expect(nodeCmd.checkNetworkNodePod(namespace, + nodeId)).resolves.toBeTruthy() + } catch (e) { + nodeCmd.logger.showUserError(e) + expect(e).toBeNull() + } finally { + await nodeCmd.close() + } + }, defaultTimeout) } - }, defaultTimeout) - }) - describe(`Node should refresh successfully [mode ${mode}, release ${releaseTag}, keyFormat: ${keyFormat}]`, () => { - const nodeId = 'node0' - - beforeAll(async () => { - const podName = await nodeRefreshTestSetup(argv, testName, k8, nodeId) - if (mode === 'kill') { - const resp = await k8.kubeClient.deleteNamespacedPod(podName, namespace) - expect(resp.response.statusCode).toEqual(200) - await sleep(20000) // sleep to wait for pod to finish terminating - } else if (mode === 'stop') { - await expect(nodeCmd.stop(argv)).resolves.toBeTruthy() - await sleep(20000) // give time for node to stop and update its logs - } else { - throw new Error(`invalid mode: ${mode}`) + function nodeRefreshShouldSucceed (nodeId, nodeCmd, argv) { + it(`${nodeId} refresh should succeed`, async () => { + try { + await expect(nodeCmd.refresh(argv)).resolves.toBeTruthy() + expect(nodeCmd.getUnusedConfigs( + NodeCommand.REFRESH_CONFIGS_NAME)).toEqual( + [flags.devMode.constName]) + } catch (e) { + nodeCmd.logger.showUserError(e) + expect(e).toBeNull() + } finally { + await nodeCmd.close() + await sleep(10000) // sleep to wait for node to finish starting + } + }, 1200000) } - }, 120000) - - nodePodShouldBeRunning(nodeCmd, namespace, nodeId) - - nodeShouldNotBeActive(nodeCmd, nodeId) - - nodeRefreshShouldSucceed(nodeId, nodeCmd, argv) - - balanceQueryShouldSucceed(accountManager, nodeCmd, namespace) - - accountCreationShouldSucceed(accountManager, nodeCmd, namespace) - }) - - describe(`Should add a new node to the network [release ${releaseTag}, keyFormat: ${keyFormat}]`, () => { - const nodeId = 'node4' - let existingServiceMap - let existingNodeIdsPrivateKeysHash - - beforeAll(async () => { - argv[flags.nodeIDs.name] = nodeId - const configManager = getTestConfigManager(`${testName}-solo.config`) - configManager.update(argv, true) - existingServiceMap = await accountManager.getNodeServiceMap(namespace) - existingNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, keyFormat, k8, getTmpDir()) - }, defaultTimeout) - - it(`${nodeId} should not exist`, async () => { - try { - await expect(nodeCmd.checkNetworkNodePod(namespace, nodeId, 10, 50)).rejects.toThrowError(`no pod found for nodeId: ${nodeId}`) - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } finally { - await nodeCmd.close() - } - }, 180000) - - balanceQueryShouldSucceed(accountManager, nodeCmd, namespace) - accountCreationShouldSucceed(accountManager, nodeCmd, namespace) - - it('should succeed with init command', async () => { - const status = await accountCmd.init(argv) - expect(status).toBeTruthy() - }, 450000) - - it(`add ${nodeId} to the network`, async () => { - try { - await expect(nodeCmd.add(argv)).resolves.toBeTruthy() - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } finally { - await nodeCmd.close() - await sleep(10000) // sleep to wait for node to finish starting + function nodeShouldNotBeActive (nodeCmd, nodeId) { + it(`${nodeId} should not be ACTIVE`, async () => { + expect(2) + try { + await expect( + nodeCmd.checkNetworkNodeState(nodeId, + 5)).rejects.toThrowError() + } catch (e) { + expect(e).not.toBeNull() + } finally { + await nodeCmd.close() + } + }, defaultTimeout) } - }, 600000) - - balanceQueryShouldSucceed(accountManager, nodeCmd, namespace) - - accountCreationShouldSucceed(accountManager, nodeCmd, namespace) - - it('existing nodes private keys should not have changed', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeIdsPrivateKeysHash(existingServiceMap, namespace, keyFormat, k8, getTmpDir()) - for (const [nodeId, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { - const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeId) - - for (const [keyFileName, existingKeyHash] of existingKeyHashMap.entries()) { - expect(`${nodeId}:${keyFileName}:${currentNodeKeyHashMap.get(keyFileName)}`).toEqual( - `${nodeId}:${keyFileName}:${existingKeyHash}`) + async function nodeRefreshTestSetup (argv, testName, k8, nodeId) { + argv[flags.nodeIDs.name] = nodeId + const configManager = getTestConfigManager(`${testName}-solo.config`) + configManager.update(argv, true) + + const podArray = await k8.getPodsByLabel( + [`app=network-${nodeId}`, + 'fullstack.hedera.com/type=network-node']) + + if (podArray.length > 0) { + const podName = podArray[0].metadata.name + k8.logger.info(`nodeRefreshTestSetup: podName: ${podName}`) + return podName + } else { + throw new Error(`pod for ${nodeId} not found`) } } - }, defaultTimeout) - }) - }) - - function accountCreationShouldSucceed (accountManager, nodeCmd, namespace) { - it('Account creation should succeed', async () => { - expect.assertions(3) - - try { - await accountManager.loadNodeClient(namespace) - expect(accountManager._nodeClient).not.toBeNull() - const privateKey = PrivateKey.generate() - const amount = 100 - - const newAccount = await new AccountCreateTransaction() - .setKey(privateKey) - .setInitialBalance(Hbar.from(amount, HbarUnit.Hbar)) - .execute(accountManager._nodeClient) - - // Get the new account ID - const getReceipt = await newAccount.getReceipt(accountManager._nodeClient) - const accountInfo = { - accountId: getReceipt.accountId.toString(), - privateKey: privateKey.toString(), - publicKey: privateKey.publicKey.toString(), - balance: amount - } - - expect(accountInfo.accountId).not.toBeNull() - expect(accountInfo.balance).toEqual(amount) - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } - }, defaultTimeout) - } - - function nodePodShouldBeRunning (nodeCmd, namespace, nodeId) { - it(`${nodeId} should be running`, async () => { - try { - await expect(nodeCmd.checkNetworkNodePod(namespace, nodeId)).resolves.toBeTruthy() - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } finally { - await nodeCmd.close() - } - }, defaultTimeout) - } - - function nodeRefreshShouldSucceed (nodeId, nodeCmd, argv) { - it(`${nodeId} refresh should succeed`, async () => { - try { - await expect(nodeCmd.refresh(argv)).resolves.toBeTruthy() - expect(nodeCmd.getUnusedConfigs(NodeCommand.REFRESH_CONFIGS_NAME)).toEqual([flags.devMode.constName]) - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } finally { - await nodeCmd.close() - await sleep(10000) // sleep to wait for node to finish starting - } - }, 1200000) - } - - function nodeShouldNotBeActive (nodeCmd, nodeId) { - it(`${nodeId} should not be ACTIVE`, async () => { - expect(2) - try { - await expect(nodeCmd.checkNetworkNodeState(nodeId, 5)).rejects.toThrowError() - } catch (e) { - expect(e).not.toBeNull() - } finally { - await nodeCmd.close() - } - }, defaultTimeout) - } - - async function nodeRefreshTestSetup (argv, testName, k8, nodeId) { - argv[flags.nodeIDs.name] = nodeId - const configManager = getTestConfigManager(`${testName}-solo.config`) - configManager.update(argv, true) - - const podArray = await k8.getPodsByLabel( - [`app=network-${nodeId}`, 'fullstack.hedera.com/type=network-node']) - - if (podArray.length > 0) { - const podName = podArray[0].metadata.name - k8.logger.info(`nodeRefreshTestSetup: podName: ${podName}`) - return podName - } else { - throw new Error(`pod for ${nodeId} not found`) - } - } - - async function getNodeIdsPrivateKeysHash (networkNodeServicesMap, namespace, keyFormat, k8, destDir) { - const dataKeysDir = `${constants.HEDERA_HAPI_PATH}/data/keys` - const tlsKeysDir = constants.HEDERA_HAPI_PATH - const nodeKeyHashMap = new Map() - for (const networkNodeServices of networkNodeServicesMap.values()) { - const keyHashMap = new Map() - const nodeId = networkNodeServices.nodeName - const uniqueNodeDestDir = path.join(destDir, nodeId) - if (!fs.existsSync(uniqueNodeDestDir)) { - fs.mkdirSync(uniqueNodeDestDir, { recursive: true }) - } - switch (keyFormat) { - case constants.KEY_FORMAT_PFX: - await addKeyHashToMap(k8, nodeId, dataKeysDir, uniqueNodeDestDir, keyHashMap, Templates.renderGossipPfxPrivateKeyFile(nodeId)) - break - case constants.KEY_FORMAT_PEM: - await addKeyHashToMap(k8, nodeId, dataKeysDir, uniqueNodeDestDir, keyHashMap, Templates.renderGossipPemPrivateKeyFile(constants.SIGNING_KEY_PREFIX, nodeId)) - await addKeyHashToMap(k8, nodeId, dataKeysDir, uniqueNodeDestDir, keyHashMap, Templates.renderGossipPemPrivateKeyFile(constants.AGREEMENT_KEY_PREFIX, nodeId)) - break - default: - throw new Error(`invalid keyFormat: ${keyFormat}`) - } - await addKeyHashToMap(k8, nodeId, tlsKeysDir, uniqueNodeDestDir, keyHashMap, 'hedera.key') - nodeKeyHashMap.set(nodeId, keyHashMap) - } - return nodeKeyHashMap - } - - async function addKeyHashToMap (k8, nodeId, keyDir, uniqueNodeDestDir, keyHashMap, privateKeyFileName) { - await k8.copyFrom( - Templates.renderNetworkPodName(nodeId), - ROOT_CONTAINER, - path.join(keyDir, privateKeyFileName), - uniqueNodeDestDir) - const keyBytes = await fs.readFileSync(path.join(uniqueNodeDestDir, privateKeyFileName)) - const keyString = keyBytes.toString() - keyHashMap.set(privateKeyFileName, crypto.createHash('sha256').update(keyString).digest('base64')) - } + }) } diff --git a/test/e2e/setup-e2e.sh b/test/e2e/setup-e2e.sh index 57da9e452..f5f8c68e7 100755 --- a/test/e2e/setup-e2e.sh +++ b/test/e2e/setup-e2e.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash readonly KIND_IMAGE="kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72" echo "SOLO_FST_CHARTS_DIR: ${SOLO_FST_CHARTS_DIR}" +export PATH=${PATH}:~/.solo/bin SOLO_CLUSTER_NAME=solo-e2e SOLO_NAMESPACE=solo-e2e @@ -20,4 +21,3 @@ kind create cluster -n "${SOLO_CLUSTER_NAME}" --image "${KIND_IMAGE}" || exit 1 solo init --namespace "${SOLO_NAMESPACE}" -i node0 -s "${SOLO_CLUSTER_SETUP_NAMESPACE}" -d "${SOLO_FST_CHARTS_DIR}" --dev || exit 1 # cache args for subsequent commands solo cluster setup || exit 1 helm list --all-namespaces -solo network deploy || exit 1 diff --git a/test/test_util.js b/test/test_util.js index e39dd2703..10816f608 100644 --- a/test/test_util.js +++ b/test/test_util.js @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. * + * @jest-environment steps */ import { afterAll, beforeAll, describe, expect, it } from '@jest/globals' import fs from 'fs' @@ -39,14 +40,21 @@ import { KeyManager, logging, PackageDownloader, - PlatformInstaller, ProfileManager, + PlatformInstaller, ProfileManager, Templates, Zippy } from '../src/core/index.mjs' -import { AccountBalanceQuery } from '@hashgraph/sdk' +import { + AccountBalanceQuery, + AccountCreateTransaction, Hbar, HbarUnit, + PrivateKey +} from '@hashgraph/sdk' +import { ROOT_CONTAINER } from '../src/core/constants.mjs' +import crypto from 'crypto' +import { AccountCommand } from '../src/commands/account.mjs' export const testLogger = logging.NewLogger('debug', true) export const TEST_CLUSTER = 'solo-e2e' -export const HEDERA_PLATFORM_VERSION_TAG = 'v0.49.0-alpha.2' +export const HEDERA_PLATFORM_VERSION_TAG = 'v0.53.0-release-0.53.xff7c43d' export function getTestCacheDir (testName) { const baseDir = 'test/data/tmp' @@ -93,13 +101,15 @@ export function getDefaultArgv () { * @param clusterCmdArg an instance of command/ClusterCommand * @param networkCmdArg an instance of command/NetworkCommand * @param nodeCmdArg an instance of command/NodeCommand + * @param accountCmdArg an instance of command/AccountCommand */ export function bootstrapTestVariables (testName, argv, k8Arg = null, initCmdArg = null, clusterCmdArg = null, networkCmdArg = null, - nodeCmdArg = null + nodeCmdArg = null, + accountCmdArg = null ) { const namespace = argv[flags.namespace.name] || 'bootstrap-ns' const cacheDir = argv[flags.cacheDir.name] || getTestCacheDir(testName) @@ -139,6 +149,7 @@ export function bootstrapTestVariables (testName, argv, const clusterCmd = clusterCmdArg || new ClusterCommand(opts) const networkCmd = networkCmdArg || new NetworkCommand(opts) const nodeCmd = nodeCmdArg || new NodeCommand(opts) + const accountCmd = accountCmdArg || new AccountCommand(opts, constants.SHORTER_SYSTEM_ACCOUNTS) return { namespace, opts, @@ -146,7 +157,8 @@ export function bootstrapTestVariables (testName, argv, initCmd, clusterCmd, networkCmd, - nodeCmd + nodeCmd, + accountCmd } } } @@ -161,15 +173,19 @@ export function bootstrapTestVariables (testName, argv, * @param clusterCmdArg an instance of command/ClusterCommand * @param networkCmdArg an instance of command/NetworkCommand * @param nodeCmdArg an instance of command/NodeCommand + * @param accountCmdArg an instance of command/AccountCommand + * @param startNodes start nodes after deployment, default is true */ export function bootstrapNetwork (testName, argv, k8Arg = null, initCmdArg = null, clusterCmdArg = null, networkCmdArg = null, - nodeCmdArg = null + nodeCmdArg = null, + accountCmdArg = null, + startNodes = true ) { - const bootstrapResp = bootstrapTestVariables(testName, argv, k8Arg, initCmdArg, clusterCmdArg, networkCmdArg, nodeCmdArg) + const bootstrapResp = bootstrapTestVariables(testName, argv, k8Arg, initCmdArg, clusterCmdArg, networkCmdArg, nodeCmdArg, accountCmdArg) const namespace = bootstrapResp.namespace const initCmd = bootstrapResp.cmd.initCmd const k8 = bootstrapResp.opts.k8 @@ -219,34 +235,37 @@ export function bootstrapNetwork (testName, argv, ]) }, 180000) - it('should succeed with node setup command', async () => { - expect.assertions(2) - try { - await expect(nodeCmd.setup(argv)).resolves.toBeTruthy() - expect(nodeCmd.getUnusedConfigs(NodeCommand.SETUP_CONFIGS_NAME)).toEqual([ - flags.apiPermissionProperties.constName, - flags.appConfig.constName, - flags.applicationProperties.constName, - flags.bootstrapProperties.constName, - flags.devMode.constName, - flags.log4j2Xml.constName, - flags.settingTxt.constName - ]) - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } - }, 240000) + if (startNodes) { + it('should succeed with node setup command', async () => { + expect.assertions(2) + try { + await expect(nodeCmd.setup(argv)).resolves.toBeTruthy() + expect(nodeCmd.getUnusedConfigs(NodeCommand.SETUP_CONFIGS_NAME)).toEqual([ + flags.apiPermissionProperties.constName, + flags.appConfig.constName, + flags.applicationProperties.constName, + flags.bootstrapProperties.constName, + flags.devMode.constName, + flags.localBuildPath.constName, + flags.log4j2Xml.constName, + flags.settingTxt.constName + ]) + } catch (e) { + nodeCmd.logger.showUserError(e) + expect(e).toBeNull() + } + }, 240000) - it('should succeed with node start command', async () => { - expect.assertions(1) - try { - await expect(nodeCmd.start(argv)).resolves.toBeTruthy() - } catch (e) { - nodeCmd.logger.showUserError(e) - expect(e).toBeNull() - } - }, 1800000) + it('should succeed with node start command', async () => { + expect.assertions(1) + try { + await expect(nodeCmd.start(argv)).resolves.toBeTruthy() + } catch (e) { + nodeCmd.logger.showUserError(e) + expect(e).toBeNull() + } + }, 1800000) + } }) return bootstrapResp @@ -273,3 +292,75 @@ export function balanceQueryShouldSucceed (accountManager, cmd, namespace) { await sleep(1000) }, 120000) } + +export function accountCreationShouldSucceed (accountManager, nodeCmd, namespace) { + it('Account creation should succeed', async () => { + expect.assertions(3) + + try { + await accountManager.loadNodeClient(namespace) + expect(accountManager._nodeClient).not.toBeNull() + const privateKey = PrivateKey.generate() + const amount = 100 + + const newAccount = await new AccountCreateTransaction() + .setKey(privateKey) + .setInitialBalance(Hbar.from(amount, HbarUnit.Hbar)) + .execute(accountManager._nodeClient) + + // Get the new account ID + const getReceipt = await newAccount.getReceipt(accountManager._nodeClient) + const accountInfo = { + accountId: getReceipt.accountId.toString(), + privateKey: privateKey.toString(), + publicKey: privateKey.publicKey.toString(), + balance: amount + } + + expect(accountInfo.accountId).not.toBeNull() + expect(accountInfo.balance).toEqual(amount) + } catch (e) { + nodeCmd.logger.showUserError(e) + expect(e).toBeNull() + } + }, 120000) +} + +export async function getNodeIdsPrivateKeysHash (networkNodeServicesMap, namespace, keyFormat, k8, destDir) { + const dataKeysDir = path.join(constants.HEDERA_HAPI_PATH, 'data', 'keys') + const tlsKeysDir = constants.HEDERA_HAPI_PATH + const nodeKeyHashMap = new Map() + for (const networkNodeServices of networkNodeServicesMap.values()) { + const keyHashMap = new Map() + const nodeId = networkNodeServices.nodeName + const uniqueNodeDestDir = path.join(destDir, nodeId) + if (!fs.existsSync(uniqueNodeDestDir)) { + fs.mkdirSync(uniqueNodeDestDir, { recursive: true }) + } + switch (keyFormat) { + case constants.KEY_FORMAT_PFX: + await addKeyHashToMap(k8, nodeId, dataKeysDir, uniqueNodeDestDir, keyHashMap, Templates.renderGossipPfxPrivateKeyFile(nodeId)) + break + case constants.KEY_FORMAT_PEM: + await addKeyHashToMap(k8, nodeId, dataKeysDir, uniqueNodeDestDir, keyHashMap, Templates.renderGossipPemPrivateKeyFile(constants.SIGNING_KEY_PREFIX, nodeId)) + await addKeyHashToMap(k8, nodeId, dataKeysDir, uniqueNodeDestDir, keyHashMap, Templates.renderGossipPemPrivateKeyFile(constants.AGREEMENT_KEY_PREFIX, nodeId)) + break + default: + throw new Error(`invalid keyFormat: ${keyFormat}`) + } + await addKeyHashToMap(k8, nodeId, tlsKeysDir, uniqueNodeDestDir, keyHashMap, 'hedera.key') + nodeKeyHashMap.set(nodeId, keyHashMap) + } + return nodeKeyHashMap +} + +async function addKeyHashToMap (k8, nodeId, keyDir, uniqueNodeDestDir, keyHashMap, privateKeyFileName) { + await k8.copyFrom( + Templates.renderNetworkPodName(nodeId), + ROOT_CONTAINER, + path.join(keyDir, privateKeyFileName), + uniqueNodeDestDir) + const keyBytes = await fs.readFileSync(path.join(uniqueNodeDestDir, privateKeyFileName)) + const keyString = keyBytes.toString() + keyHashMap.set(privateKeyFileName, crypto.createHash('sha256').update(keyString).digest('base64')) +} diff --git a/version.mjs b/version.mjs index 202a59374..75b7c406f 100644 --- a/version.mjs +++ b/version.mjs @@ -21,5 +21,5 @@ export const JAVA_VERSION = '21.0.1+12' export const HELM_VERSION = 'v3.14.2' -export const FST_CHART_VERSION = 'v0.29.0' -export const HEDERA_PLATFORM_VERSION = 'v0.49.0-alpha.2' +export const FST_CHART_VERSION = 'v0.29.1' +export const HEDERA_PLATFORM_VERSION = 'v0.53.0-release-0.53.xff7c43d'