diff --git a/.github/workflows/test-backup-restore-migration.yaml b/.github/workflows/test-backup-restore-migration.yaml
new file mode 100644
index 00000000..206844f3
--- /dev/null
+++ b/.github/workflows/test-backup-restore-migration.yaml
@@ -0,0 +1,241 @@
+---
+name: "Backup and restore test"
+
+on:
+ workflow_dispatch:
+ inputs:
+ image:
+ description: "GCP image for test cluster"
+ required: true
+ default: "almalinux-cloud/almalinux-8"
+ architecture:
+ description: "PE architecture to test"
+ required: true
+ default: "standard"
+ version:
+ description: "PE version to install"
+ required: true
+ default: "2021.7.4"
+ ssh-debugging:
+ description: "Boolean; whether or not to pause for ssh debugging"
+ required: true
+ default: "false"
+
+jobs:
+ backup:
+ name: "Backup: Cluster A: PE ${{ inputs.version }} ${{ inputs.architecture }} on ${{ inputs.image }}"
+ runs-on: ubuntu-20.04
+ env:
+ BOLT_GEM: true
+ BOLT_DISABLE_ANALYTICS: true
+ LANG: "en_US.UTF-8"
+
+ steps:
+ - name: "Start SSH session"
+ if: ${{ github.event.inputs.ssh-debugging == 'true' }}
+ uses: luchihoratiu/debug-via-ssh@main
+ with:
+ NGROK_AUTH_TOKEN: ${{ secrets.NGROK_AUTH_TOKEN }}
+ SSH_PASS: ${{ secrets.SSH_PASS }}
+
+ - name: "Checkout Source"
+ uses: actions/checkout@v2
+
+ - name: "Activate Ruby 2.7"
+ uses: ruby/setup-ruby@v1
+ with:
+ ruby-version: "2.7"
+ bundler-cache: true
+
+ - name: "Print bundle environment"
+ if: ${{ github.repository_owner == 'puppetlabs' }}
+ run: |
+ echo ::group::info:bundler
+ bundle env
+ echo ::endgroup::
+
+ - name: "Provision test cluster"
+ timeout-minutes: 15
+ run: |
+ echo ::group::prepare
+ mkdir -p $HOME/.ssh
+ echo 'Host *' > $HOME/.ssh/config
+ echo ' ServerAliveInterval 150' >> $HOME/.ssh/config
+ echo ' ServerAliveCountMax 2' >> $HOME/.ssh/config
+ bundle exec rake spec_prep
+ echo ::endgroup::
+
+ echo ::group::provision
+ bundle exec bolt plan run peadm_spec::provision_test_cluster \
+ --modulepath spec/fixtures/modules \
+ provider=provision_service \
+ image=${{ inputs.image }} \
+ architecture=${{ inputs.architecture }}
+ echo ::endgroup::
+
+ echo ::group::info:request
+ cat request.json || true; echo
+ echo ::endgroup::
+
+ echo ::group::info:inventory
+ sed -e 's/password: .*/password: "[redacted]"/' < spec/fixtures/litmus_inventory.yaml || true
+ echo ::endgroup::
+
+ # - name: Save inventory file A to an artifact
+ # uses: actions/upload-artifact@v3
+ # with:
+ # name: inventory_A
+ # path: spec/fixtures/litmus_inventory.yaml
+
+ - name: "Install PE on test cluster"
+ timeout-minutes: 120
+ run: |
+ bundle exec bolt plan run peadm_spec::install_test_cluster \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ architecture=${{ inputs.architecture }} \
+ version=${{ inputs.version }}
+
+ - name: "Start SSH session"
+ if: github.event.inputs.ssh-debugging == 'true'
+ uses: luchihoratiu/debug-via-ssh@main
+ with:
+ NGROK_AUTH_TOKEN: ${{ secrets.NGROK_AUTH_TOKEN }}
+ SSH_PASS: ${{ secrets.SSH_PASS }}
+
+ # - name: Download artifacts
+ # # if: always()
+ # uses: actions/download-artifact@v3
+ # with:
+ # path: spec/fixtures/
+
+ - name: perform PE backup of cluster A
+ timeout-minutes: 10
+ continue-on-error: true
+ run: |
+ echo ::group::prepare
+ mkdir -p $HOME/.ssh
+ echo 'Host *' > $HOME/.ssh/config
+ echo ' ServerAliveInterval 150' >> $HOME/.ssh/config
+ echo ' ServerAliveCountMax 2' >> $HOME/.ssh/config
+ bundle exec rake spec_prep
+ echo ::endgroup::
+
+ echo ::group::backup
+ bundle exec bolt plan run peadm_spec::test_backup \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules
+ echo ::endgroup::
+
+ - name: "Wait as long as the file ${HOME}/pause file is present"
+ continue-on-error: true
+ # if: ${{ always() && github.event.inputs.ssh-debugging == 'true' }}
+ if: github.event.inputs.ssh-debugging == 'true'
+ run: |
+ while [ -f "${HOME}/pause" ] ; do
+ echo "${HOME}/pause present, sleeping for 60 seconds..."
+ sleep 10
+ done
+ echo "${HOME}/pause absent, continuing workflow."
+
+ - name: "Tear down cluster A"
+ if: always()
+ run: |
+ if [ -f spec/fixtures/litmus_inventory.yaml ]; then
+ echo ::group::tear_down
+ bundle exec rake 'litmus:tear_down'
+ echo ::endgroup::
+
+ echo ::group::info:request
+ cat request.json || true; echo
+ echo ::endgroup::
+ fi
+
+ restore:
+ name: "Restore: Cluster B: PE ${{ inputs.version }} ${{ inputs.architecture }} on ${{ inputs.image }}"
+ runs-on: ubuntu-20.04
+ env:
+ BOLT_GEM: true
+ BOLT_DISABLE_ANALYTICS: true
+ LANG: "en_US.UTF-8"
+
+ steps:
+ - name: "Checkout Source"
+ uses: actions/checkout@v2
+
+ - name: "Activate Ruby 2.7"
+ uses: ruby/setup-ruby@v1
+ with:
+ ruby-version: "2.7"
+ bundler-cache: true
+
+ - name: "Print bundle environment"
+ if: ${{ github.repository_owner == 'puppetlabs' }}
+ run: |
+ echo ::group::info:bundler
+ bundle env
+ echo ::endgroup::
+
+ - name: "Provision test cluster"
+ timeout-minutes: 15
+ run: |
+ echo ::group::prepare
+ mkdir -p $HOME/.ssh
+ echo 'Host *' > $HOME/.ssh/config
+ echo ' ServerAliveInterval 150' >> $HOME/.ssh/config
+ echo ' ServerAliveCountMax 2' >> $HOME/.ssh/config
+ bundle exec rake spec_prep
+ echo ::endgroup::
+
+ echo ::group::provision
+ bundle exec bolt plan run peadm_spec::provision_test_cluster \
+ --modulepath spec/fixtures/modules \
+ provider=provision_service \
+ image=${{ inputs.image }} \
+ architecture=${{ inputs.architecture }}
+ echo ::endgroup::
+
+ echo ::group::info:request
+ cat request.json || true; echo
+ echo ::endgroup::
+
+ echo ::group::info:inventory
+ sed -e 's/password: .*/password: "[redacted]"/' < spec/fixtures/litmus_inventory.yaml || true
+ echo ::endgroup::
+
+ # - name: Save inventory file B to an artifact
+ # uses: actions/upload-artifact@v3
+ # with:
+ # name: inventory_B
+ # path: spec/fixtures/litmus_inventory.yaml
+
+ - name: "Install PE on test cluster"
+ timeout-minutes: 120
+ run: |
+ bundle exec bolt plan run peadm_spec::install_test_cluster \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ architecture=${{ inputs.architecture }} \
+ version=${{ inputs.version }}
+
+ - name: Wait for backup to finish
+ uses: lewagon/wait-on-check-action@v1.3.1
+ with:
+ ref: ${{ github.ref }}
+ check-name: "Backup: Cluster A: PE ${{ inputs.version }} ${{ inputs.architecture }} on ${{ inputs.image }}"
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ wait-interval: 10
+
+ - name: "Tear down cluster B"
+ if: always()
+ run: |
+ cp spec/fixtures/inventory_B/litmus_inventory.yaml spec/fixtures/litmus_inventory.yaml || true
+ if [ -f spec/fixtures/litmus_inventory.yaml ]; then
+ echo ::group::tear_down
+ bundle exec rake 'litmus:tear_down'
+ echo ::endgroup::
+
+ echo ::group::info:request
+ cat request.json || true; echo
+ echo ::endgroup::
+ fi
diff --git a/.github/workflows/test-backup-restore.yaml b/.github/workflows/test-backup-restore.yaml
index 30990cc8..e7fce6de 100644
--- a/.github/workflows/test-backup-restore.yaml
+++ b/.github/workflows/test-backup-restore.yaml
@@ -2,45 +2,47 @@
name: "Backup and restore test"
on:
+ pull_request:
+ types: [ready_for_review]
+
workflow_dispatch:
inputs:
image:
- description: 'GCP image for test cluster'
+ description: "GCP image for test cluster"
required: true
- default: 'almalinux-cloud/almalinux-8'
+ default: "almalinux-cloud/almalinux-8"
architecture:
- description: 'PE architecture to test'
+ description: "PE architecture to test"
required: true
- default: 'standard'
+ default: "standard"
+ type: choice
+ options:
+ - standard
+ - standard-with-dr
+ - large
+ - large-with-dr
+ - extra-large
+ - extra-large-with-dr
version:
- description: 'PE version to install'
+ description: "PE version to install"
required: true
- default: '2021.7.8'
+ default: "2023.5.0"
ssh-debugging:
- description: 'Boolean; whether or not to pause for ssh debugging'
+ description: "Boolean; whether or not to pause for ssh debugging"
required: true
- default: 'false'
+ default: "false"
jobs:
- test-backup-restore:
- name: "PE ${{ matrix.version }} ${{ matrix.architecture }} on ${{ matrix.image }}"
+ backup-restore-test:
+ name: "Backup, break and restore cluster: PE ${{ github.event.inputs.version || '2023.5.0' }} ${{ github.event.inputs.architecture || 'extra-large' }} on ${{ github.event.inputs.image || 'almalinux-cloud/almalinux-8' }}"
runs-on: ubuntu-20.04
env:
BOLT_GEM: true
BOLT_DISABLE_ANALYTICS: true
- LANG: 'en_US.UTF-8'
- strategy:
- fail-fast: false
- matrix:
- architecture:
- - "${{ github.event.inputs.architecture }}"
- version:
- - "${{ github.event.inputs.version }}"
- image:
- - "${{ github.event.inputs.image }}"
+ LANG: "en_US.UTF-8"
steps:
- - name: 'Start SSH session'
+ - name: "Start SSH session"
if: ${{ github.event.inputs.ssh-debugging == 'true' }}
uses: luchihoratiu/debug-via-ssh@main
with:
@@ -63,7 +65,7 @@ jobs:
bundle env
echo ::endgroup::
- - name: 'Provision test cluster'
+ - name: "Provision test cluster"
timeout-minutes: 15
run: |
echo ::group::prepare
@@ -75,11 +77,11 @@ jobs:
echo ::endgroup::
echo ::group::provision
- bundle exec bolt plan run peadm_spec::provision_test_cluster \
- --modulepath spec/fixtures/modules \
- provider=provision_service \
- image=${{ matrix.image }} \
- architecture=${{ matrix.architecture }}
+ bundle exec bolt plan run peadm_spec::provision_test_cluster \
+ --modulepath spec/fixtures/modules \
+ provider=provision_service \
+ image=${{ github.event.inputs.image || 'almalinux-cloud/almalinux-8' }} \
+ architecture=${{ github.event.inputs.architecture || 'extra-large' }}
echo ::endgroup::
echo ::group::info:request
@@ -90,27 +92,161 @@ jobs:
sed -e 's/password: .*/password: "[redacted]"/' < spec/fixtures/litmus_inventory.yaml || true
echo ::endgroup::
- - name: 'Install PE on test cluster'
+ - name: "Install PE on test cluster"
timeout-minutes: 120
run: |
bundle exec bolt plan run peadm_spec::install_test_cluster \
--inventoryfile spec/fixtures/litmus_inventory.yaml \
--modulepath spec/fixtures/modules \
- architecture=${{ matrix.architecture }} \
- version=${{ matrix.version }}
+ architecture=${{ github.event.inputs.architecture || 'extra-large' }} \
+ version=${{ github.event.inputs.version || '2023.5.0' }} \
+ --stream
+
+ - name: Perform peadm backup of cluster
+ timeout-minutes: 10
+ continue-on-error: true
+ run: |
+ echo ::group::prepare
+ mkdir -p $HOME/.ssh
+ echo 'Host *' > $HOME/.ssh/config
+ echo ' ServerAliveInterval 150' >> $HOME/.ssh/config
+ echo ' ServerAliveCountMax 2' >> $HOME/.ssh/config
+ bundle exec rake spec_prep
+ echo ::endgroup::
+
+ echo ::group::backup
+ bundle exec bolt plan run peadm_spec::test_backup \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --stream
+ echo ::endgroup::
+
+ - name: Set up yq
+ uses: frenck/action-setup-yq@v1
+ with:
+ version: v4.30.5
+
+ - name: Break the primary host
+ run: |
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+ echo "Removing ssl directories"
+ bundle exec bolt command run "rm -rf /etc/puppetlabs/puppetserver/ca /etc/puppetlabs/puppet/ssl" -t $primary --inventoryfile spec/fixtures/litmus_inventory.yaml
+ echo "Removing classifier database"
+ bundle exec bolt command run "rm -rf /opt/puppetlabs/server/data/postgresql/classifier" -t $primary --inventoryfile spec/fixtures/litmus_inventory.yaml
+ #TODO if arch is XL, run pe-uninstaller on the primary database
+
+ - name: Output PE cluster status
+ run: |
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt command run "puppet infrastructure status" -t $primary --inventoryfile spec/fixtures/litmus_inventory.yaml
+
+ - name: Reinstall PE on the primary host
+ continue-on-error: true
+ run: |
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt task run peadm::reinstall_pe uninstall=true version=${{ github.event.inputs.version || '2023.5.0' }} -t $primary \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --verbose \
+ --stream
- - name: 'Wait as long as the file ${HOME}/pause file is present'
+ - name: Perform peadm recovery restore of primary server
+ timeout-minutes: 30
+ continue-on-error: true
+ run: |
+ echo ::group::prepare
+ mkdir -p $HOME/.ssh
+ echo 'Host *' > $HOME/.ssh/config
+ echo ' ServerAliveInterval 150' >> $HOME/.ssh/config
+ echo ' ServerAliveCountMax 2' >> $HOME/.ssh/config
+ bundle exec rake spec_prep
+ echo ::endgroup::
+
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+
+ #TODO update the restore to:
+ # - restore the puppetdb in the standard cases
+ # - not restore the puppetdb if there is a broken external db
+ echo ::group::restore
+ bundle exec bolt plan run peadm_spec::test_restore \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --stream \
+ || true # ignore errors
+ echo ::endgroup::
+
+ - name: Output PE cluster status
+ run: |
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt command run "puppet infrastructure status" -t $primary --inventoryfile spec/fixtures/litmus_inventory.yaml
+
+ - name: Smoke test
+ run: |
+ bundle exec bolt plan run peadm_spec::puppet_run_test \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --stream
+
+ - name: Break external DB
+ if: ${{ github.event.inputs.architecture || 'extra-large' }} == 'extra-large'
+ run: |
+ echo "Uninstalling PE from primary database"
+ primary_db=$(yq '.groups[].targets[] | select(.vars.role == "primary-pdb-postgresql") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt command run "/opt/puppetlabs/bin/puppet-enterprise-uninstaller -p -d -y || true" -t $primary_db \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --verbose \
+ --stream
+
+ - name: Output PE cluster status
+ if: ${{ github.event.inputs.architecture || 'extra-large' }} == 'extra-large'
+ run: |
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt command run "puppet infrastructure status" -t $primary --inventoryfile spec/fixtures/litmus_inventory.yaml
+
+ - name: Setup Primary DB in XL
+ if: ${{ github.event.inputs.architecture || 'extra-large' }} == 'extra-large'
+ run: |
+ primary_db=$(yq '.groups[].targets[] | select(.vars.role == "primary-pdb-postgresql") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt plan run peadm::util::init_db_server db_host=$primary_db \
+ install_pe=true \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --stream \
+ || true # ignore errors
+
+ # restore the puppetdb database
+ bundle exec bolt plan run peadm_spec::test_restore restore_type="recovery-db" \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --stream
+
+ - name: Output PE cluster status
+ if: ${{ github.event.inputs.architecture || 'extra-large' }} == 'extra-large'
+ run: |
+ primary=$(yq '.groups[].targets[] | select(.vars.role == "primary") | .uri' spec/fixtures/litmus_inventory.yaml)
+ bundle exec bolt command run "puppet infrastructure status" -t $primary --inventoryfile spec/fixtures/litmus_inventory.yaml
+
+ - name: Smoke test
+ if: ${{ github.event.inputs.architecture || 'extra-large' }} == 'extra-large'
+ run: |
+ bundle exec bolt plan run peadm_spec::puppet_run_test \
+ --inventoryfile spec/fixtures/litmus_inventory.yaml \
+ --modulepath spec/fixtures/modules \
+ --stream
+
+ - name: "Wait as long as the file ${HOME}/pause file is present"
+ continue-on-error: true
if: ${{ always() && github.event.inputs.ssh-debugging == 'true' }}
run: |
while [ -f "${HOME}/pause" ] ; do
echo "${HOME}/pause present, sleeping for 60 seconds..."
- sleep 60
+ sleep 10
done
echo "${HOME}/pause absent, continuing workflow."
- - name: 'Tear down test cluster'
- if: ${{ always() }}
- continue-on-error: true
+ - name: "Tear down cluster"
+ if: always()
run: |
if [ -f spec/fixtures/litmus_inventory.yaml ]; then
echo ::group::tear_down
diff --git a/.ruby-version b/.ruby-version
new file mode 100644
index 00000000..6a81b4c8
--- /dev/null
+++ b/.ruby-version
@@ -0,0 +1 @@
+2.7.8
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
deleted file mode 100644
index 6b391843..00000000
--- a/.vscode/extensions.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "recommendations": [
- "puppet.puppet-vscode",
- "Shopify.ruby-lsp",
- ]
-}
\ No newline at end of file
diff --git a/Gemfile b/Gemfile
index 0332b5d7..d1825623 100644
--- a/Gemfile
+++ b/Gemfile
@@ -35,7 +35,7 @@ group :development do
gem "rubocop-performance", '= 1.16.0', require: false
gem "rubocop-rspec", '= 2.19.0', require: false
gem "rb-readline", '= 0.5.5', require: false, platforms: [:mswin, :mingw, :x64_mingw]
- gem "bolt", '>= 3.10.0', require: false
+ gem "bolt", '>= 3.27.2', require: false
gem "github_changelog_generator", '1.16.4', require: false
gem "octokit", '4.21.0', require: false
end
diff --git a/Puppetfile b/Puppetfile
new file mode 100644
index 00000000..9cbccda3
--- /dev/null
+++ b/Puppetfile
@@ -0,0 +1,9 @@
+# This Puppetfile is managed by Bolt. Do not edit.
+# For more information, see https://pup.pt/bolt-modules
+
+# The following directive installs modules to the managed moduledir.
+moduledir '.modules'
+
+mod 'WhatsARanjit/node_manager', '0.7.5'
+mod 'puppet/format', '1.1.1'
+mod 'puppetlabs/stdlib', '9.5.0'
diff --git a/REFERENCE.md b/REFERENCE.md
index 12179ffe..ac9180c5 100644
--- a/REFERENCE.md
+++ b/REFERENCE.md
@@ -31,9 +31,11 @@
* [`peadm::generate_pe_conf`](#peadm--generate_pe_conf): Generate a pe.conf file in JSON format
* [`peadm::get_pe_conf`](#peadm--get_pe_conf)
* [`peadm::get_targets`](#peadm--get_targets): Accept undef or a SingleTargetSpec, and return an Array[Target, 1, 0]. This differs from get_target() in that: - It returns an Array[Target
+* [`peadm::migration_opts_default`](#peadm--migration_opts_default)
* [`peadm::node_manager_yaml_location`](#peadm--node_manager_yaml_location)
* [`peadm::oid`](#peadm--oid)
* [`peadm::plan_step`](#peadm--plan_step)
+* [`peadm::recovery_opts_all`](#peadm--recovery_opts_all)
* [`peadm::recovery_opts_default`](#peadm--recovery_opts_default)
* [`peadm::update_pe_conf`](#peadm--update_pe_conf): Update the pe.conf file on a target with the provided hash
* [`peadm::wait_until_service_ready`](#peadm--wait_until_service_ready): A convenience function to help remember port numbers for services and handle running the wait_until_service_ready task
@@ -73,6 +75,7 @@
* [`puppet_runonce`](#puppet_runonce): Run the Puppet agent one time
* [`rbac_token`](#rbac_token): Get and save an rbac token for the root user, admin rbac user
* [`read_file`](#read_file): Read the contents of a file
+* [`reinstall_pe`](#reinstall_pe): Reinstall PE, only to be used to restore PE
* [`restore_classification`](#restore_classification): A short description of this task
* [`sign_csr`](#sign_csr): Submit a certificate signing request
* [`ssl_clean`](#ssl_clean): Clean an agent's certificate
@@ -85,13 +88,16 @@
#### Public Plans
* [`peadm::add_database`](#peadm--add_database)
+* [`peadm::backup`](#peadm--backup): Backup puppet primary configuration
* [`peadm::backup_ca`](#peadm--backup_ca)
* [`peadm::convert`](#peadm--convert): Convert an existing PE cluster to a PEAdm-managed cluster
* [`peadm::install`](#peadm--install): Install a new PE cluster
* [`peadm::modify_certificate`](#peadm--modify_certificate): Modify the certificate of one or more targets
+* [`peadm::restore`](#peadm--restore): Restore puppet primary configuration
* [`peadm::restore_ca`](#peadm--restore_ca)
* [`peadm::status`](#peadm--status): Return status information from one or more PE clusters in a table format
* [`peadm::upgrade`](#peadm--upgrade): Upgrade a PEAdm-managed cluster
+* [`peadm::util::init_db_server`](#peadm--util--init_db_server)
#### Private Plans
@@ -756,6 +762,18 @@ Data type: `Optional[Integer[1,1]]`
+### `peadm::migration_opts_default`
+
+Type: Puppet Language
+
+The peadm::migration_opts_default function.
+
+#### `peadm::migration_opts_default()`
+
+The peadm::migration_opts_default function.
+
+Returns: `Any`
+
### `peadm::node_manager_yaml_location`
Type: Ruby 4.x API
@@ -810,6 +828,18 @@ Data type: `Callable`
+### `peadm::recovery_opts_all`
+
+Type: Puppet Language
+
+The peadm::recovery_opts_all function.
+
+#### `peadm::recovery_opts_all()`
+
+The peadm::recovery_opts_all function.
+
+Returns: `Any`
+
### `peadm::recovery_opts_default`
Type: Puppet Language
@@ -945,12 +975,14 @@ Alias of
```puppet
Struct[{
+ 'activity' => Optional[Boolean],
+ 'ca' => Optional[Boolean],
+ 'classifier' => Optional[Boolean],
+ 'code' => Optional[Boolean],
+ 'config' => Optional[Boolean],
'orchestrator' => Optional[Boolean],
'puppetdb' => Optional[Boolean],
'rbac' => Optional[Boolean],
- 'activity' => Optional[Boolean],
- 'ca' => Optional[Boolean[false]],
- 'classifier' => Optional[Boolean],
}]
```
@@ -1392,6 +1424,32 @@ Data type: `String`
Path to the file to read
+### `reinstall_pe`
+
+Reinstall PE, only to be used to restore PE
+
+**Supports noop?** false
+
+#### Parameters
+
+##### `version`
+
+Data type: `String[1]`
+
+The PE version to install
+
+##### `arch`
+
+Data type: `String[1]`
+
+The PE installation platform
+
+##### `uninstall`
+
+Data type: `Boolean`
+
+Whether we want to uninstall PE before installing
+
### `restore_classification`
A short description of this task
@@ -1541,6 +1599,57 @@ Optional[Enum[
Default value: `undef`
+### `peadm::backup`
+
+Backup puppet primary configuration
+
+#### Examples
+
+#####
+
+```puppet
+bolt plan run peadm::backup -t primary1.example.com
+```
+
+#### Parameters
+
+The following parameters are available in the `peadm::backup` plan:
+
+* [`targets`](#-peadm--backup--targets)
+* [`backup_type`](#-peadm--backup--backup_type)
+* [`backup`](#-peadm--backup--backup)
+* [`output_directory`](#-peadm--backup--output_directory)
+
+##### `targets`
+
+Data type: `Peadm::SingleTargetSpec`
+
+This should be the primary puppetserver for the puppet cluster
+
+##### `backup_type`
+
+Data type: `Enum['recovery', 'custom']`
+
+Currently, the recovery and custom backup types are supported
+
+Default value: `'recovery'`
+
+##### `backup`
+
+Data type: `Peadm::Recovery_opts`
+
+A hash of custom backup options, see the peadm::recovery_opts_default() function for the default values
+
+Default value: `{}`
+
+##### `output_directory`
+
+Data type: `String`
+
+The directory to place the backup in
+
+Default value: `'/tmp'`
+
### `peadm::backup_ca`
The peadm::backup_ca class.
@@ -2004,6 +2113,55 @@ Data type: `Boolean`
Default value: `false`
+### `peadm::restore`
+
+Restore puppet primary configuration
+
+#### Examples
+
+#####
+
+```puppet
+bolt plan run peadm::restore -t primary1.example.com input_file=/tmp/peadm-backup.tar.gz
+```
+
+#### Parameters
+
+The following parameters are available in the `peadm::restore` plan:
+
+* [`targets`](#-peadm--restore--targets)
+* [`restore_type`](#-peadm--restore--restore_type)
+* [`restore`](#-peadm--restore--restore)
+* [`input_file`](#-peadm--restore--input_file)
+
+##### `targets`
+
+Data type: `Peadm::SingleTargetSpec`
+
+This should be the primary puppetserver for the puppet cluster
+
+##### `restore_type`
+
+Data type: `Enum['recovery', 'recovery-db', 'custom']`
+
+Choose from `recovery`, `recovery-db` and `custom`
+
+Default value: `'recovery'`
+
+##### `restore`
+
+Data type: `Peadm::Recovery_opts`
+
+A hash of custom backup options, see the peadm::recovery_opts_default() function for the default values
+
+Default value: `{}`
+
+##### `input_file`
+
+Data type: `Pattern[/.*\.tar\.gz$/]`
+
+The file containing the backup to restore from
+
### `peadm::restore_ca`
The peadm::restore_ca class.
@@ -2289,3 +2447,46 @@ Optional[Enum[
Default value: `undef`
+### `peadm::util::init_db_server`
+
+The peadm::util::init_db_server class.
+
+#### Parameters
+
+The following parameters are available in the `peadm::util::init_db_server` plan:
+
+* [`db_host`](#-peadm--util--init_db_server--db_host)
+* [`install_pe`](#-peadm--util--init_db_server--install_pe)
+* [`pe_version`](#-peadm--util--init_db_server--pe_version)
+* [`pe_platform`](#-peadm--util--init_db_server--pe_platform)
+
+##### `db_host`
+
+Data type: `String[1]`
+
+
+
+##### `install_pe`
+
+Data type: `Boolean`
+
+
+
+Default value: `false`
+
+##### `pe_version`
+
+Data type: `String[1]`
+
+
+
+Default value: `'2023.5.0'`
+
+##### `pe_platform`
+
+Data type: `String[1]`
+
+
+
+Default value: `'el-8-x86_64'`
+
diff --git a/bolt-project.yaml b/bolt-project.yaml
new file mode 100644
index 00000000..e1a6d593
--- /dev/null
+++ b/bolt-project.yaml
@@ -0,0 +1,9 @@
+---
+name: peadm
+stream: true
+modules:
+ - puppetlabs/stdlib
+ - puppet/format
+ - name: node_manager
+ git: https://github.com/WhatsARanjit/puppet-node_manager
+ ref: 86cb48f27a4a0ea212b33f86775d84a374a189b7
diff --git a/documentation/backup_restore.md b/documentation/backup_restore.md
new file mode 100644
index 00000000..4ad28012
--- /dev/null
+++ b/documentation/backup_restore.md
@@ -0,0 +1,162 @@
+# Backup and restore Puppet Enterprise (PE)
+
+- [Backup and restore Puppet Enterprise (PE)](#backup-and-restore-puppet-enterprise-pe)
+ - [Introduction to PEADM backup and restore](#introduction-to-peadm-backup-and-restore)
+ - [Using `recovery` backup and restore](#using-recovery-backup-and-restore)
+ - [Using `custom` backup and restore](#using-custom-backup-and-restore)
+ - [What exactly is backed up and restored?](#what-exactly-is-backed-up-and-restored)
+ - [Recovering a primary server when some or all services are not operational](#recovering-a-primary-server-when-some-or-all-services-are-not-operational)
+ - [Recovering a non-operational database server in an extra-large installation](#recovering-a-non-operational-database-server-in-an-extra-large-installation)
+
+## Introduction to PEADM backup and restore
+
+If your PE installation is managed by PEADM, you can back up and restore PE using this process:
+1. Use the `peadm::backup` plan to create a backup of your primary server.
+2. Use the `peadm::restore` plan to restore PE from a `peadm::backup`.
+
+**Important:** If your PE installation is not managed by PEADM, you cannot use the `peadm::backup` and `peadm::restore` plans. For information about converting to a PEADM-managed installation, see [Convert](https://github.com/puppetlabs/puppetlabs-peadm/blob/main/documentation/convert.md).
+
+You can specify the type of backup or restore plan you want to use. There are two types:
+- `recovery`: Use this type to create a full backup of your primary server, including data for all services. The recovery option allows you to restore your primary server and all services (including database services running on external servers) to the exact state they were in at the time of the backup.
+- `custom`: Use this type when you want to back up and restore data for specific services.
+
+If no type is specified, the default is `recovery`.
+
+**Important**: When restoring your installation, the hostname of the primary server you are restoring to _must be the same as_ the hostname of the primary server you created the backup from.
+You cannot successfully restore your installation if you change the hostname of your primary server during the recovery process.
+
+## Using `recovery` backup and restore
+
+When you run a `recovery` backup plan, the primary server configuration is backed up in full. In the event of a primary server failure, this backup can be used to to restore your primary server and all services (including database services running on external servers) to the exact state they were in at the time of the backup.
+
+### Create a `recovery` backup
+Run one of the following commands:
+- To create the backup file in the default location, run the `peadm::backup` plan, including the `--targets` option to specify the hostname (FQDN) of your primary server:
+```
+bolt plan run peadm::backup --targets my.primary.vm backup_type=recovery
+```
+- Alternatively, because `recovery` is the default type, you can use this simplified command:
+```
+bolt plan run peadm::backup --targets my.primary.vm
+```
+- To place the backup file in a custom location, define the `output_directory` parameter. For example:
+```
+bolt plan run peadm::backup --targets my.primary.vm backup_type=recovery output_directory=/custom_path
+```
+### Restore your installation from a `recovery` backup
+Run the `peadm::restore` plan, including the `--targets` option to specify the hostname (FQDN) of your primary server and defining the `input_file` parameter to specify the path to the backup file you want to use. For example.
+```
+bolt plan run peadm::restore --targets my.primary.vm input_file="/tmp/my_backup.tar.gz"
+```
+**Note**: Restoring from a `recovery` backup restarts any services that are unavailable on the primary server.
+
+## Using `custom` backup and restore
+### Create a `custom` backup.
+To customize the items you back up, first create a JSON file in which you define the `backup_type` parameter as `custom` and define the `backup` parameter by specifying which items you want to exclude. For example:
+```
+{
+ "backup_type" : "custom",
+ "backup": {
+ "activity" : false,
+ "ca" : true,
+ "classifier" : false,
+ "code" : true,
+ "config" : true,
+ "orchestrator" : false,
+ "puppetdb" : true,
+ "rbac" : false
+ }
+}
+```
+When you have created the JSON file specifying your custom backup, run the `peadm::backup` plan, including the `--params` option and specifying the relevant filename (and file path if necessary). For example:
+```
+bolt plan run peadm::backup --targets my.primary.vm --params @params.json
+```
+### Restore custom items
+To customize the items you restore, create a JSON file in which you define the `restore_type` parameter as `custom`, define the `restore` parameter by specifying the items you want to exclude, and define the `input_file` parameter by specifying the path to the relevant backup file. For example,
+```
+{
+ "restore_type" : "custom",
+ "restore": {
+ "activity" : false,
+ "ca" : true,
+ "classifier" : false,
+ "code" : true,
+ "config" : true,
+ "orchestrator" : false,
+ "puppetdb" : true,
+ "rbac" : false
+ },
+ "input_file" : "/tmp/my_backup.tar.gz"
+}
+```
+
+When you have created the JSON file specifying your custom restore options, run the `peadm::restore` plan, including the `--params` option and specifying the relevant filename (and file path if necessary). For example:
+```
+bolt plan run peadm::restore --targets my.primary.vm --params @params.json
+```
+
+## What exactly is backed up and restored?
+
+The following table shows the items you can specify and indicates what is included in `recovery`:
+
+| Data or service | Explanation | Used in `recovery` |
+| --------------- | -------------------------------------------------------------------------------------------------------- | ------------------ |
+| `activity ` | Activity database | |
+| `ca ` | CA and ssl certificates | ✅ |
+| `classifier` | Classifier database. Restore merges user-defined node groups rather than overwriting system node groups. | |
+| `code` | Code directory | ✅ |
+| `config` | Configuration files and databases (databases are restored literally) | ✅ |
+| `orchestrator ` | Orchestrator database and secrets | |
+| `puppetdb` | PuppetDB database (including support for XL where puppetdb is running on an external db server) | ✅ |
+| `rbac` | RBAC database and secrets | |
+
+**Note**: The PEADM backup and restore plans utilize the `puppet-backup` tool for backing up and restoring `ca`, `code` and `config`. For `config`, the data backed up includes the `activity`, `classifier`, `orchestrator`, and `rbac` databases.
+
+**Note:** The output for the `peadm::backup` plan differs from the output that is returned when you manually run the [`puppet-backup create` command](https://puppet.com/docs/pe/latest/backing_up_and_restoring_pe.html#back_up_pe_infrastructure).
+
+## Recovering a primary server when some or all services are not operational
+
+**Important**: To complete the recovery process outlined here, you must have a recovery backup of your primary server.
+
+If you cannot run the `recovery` restore plan directly because your primary server is not operational, you can use the following process to restore PE:
+1. On the node hosting the affected primary server, uninstall and reinstall PE, ensuring that you re-install the same PE version. Optionally, you can use the `peadm::reinstall_pe` task as follows:
+ ```
+ bolt task run peadm::reinstall_pe --targets my.primary.vm uninstall=true version=2023.5.0
+ ```
+1. Perform a `recovery` restore of your primary server, specifying the backup file that you want to use. For example:
+ ```
+ bolt plan run peadm::restore --targets my.primary.vm input_file="/tmp/my_backup.tar.gz" restore_type=recovery
+ ```
+
+## Recovering a non-operational database server in an extra-large installation
+
+**Important**: To complete the recovery process outlined here, you must have a recovery backup of your primary server.
+
+When your primary database server is not operational, you might not be able to use the `recovery` restore directly because the puppetdb database service will not be operational. In this case, follow the steps below to restore your primary database:
+
+1. Reinstall Puppet Enterprise on the affected database server and reconfigure and re-sign its certificate. Make sure you are installing the same PE version as your current primary server was running.
+To do this, use the plan `peadm::util::init_db_server` as follows:
+ ```
+ bolt plan run peadm::util::init_db_server db_host=my.primary_db.vm pe_version=2023.5.0 install_pe=true pe_platform=el-8-x86_64
+ ```
+
+ This plan performs the following tasks:
+
+ 1. Cleans the current certificate for the database server from the primary server.
+ 1. Requests a new certificate for the database server with the right extensions (`peadm_role = puppet/puppetdb-database`, `peadm_availability_group=A`).
+ 1. Stops the puppetdb service on the compilers.
+ 1. Prepares a `pe.conf` file on the database server for database installation
+ 1. Installs PE on the database server using the generated `pe.conf` file.
+ 1. Configures the database as the primary puppetdb database in the XL installation.
+ 1. Runs puppet on the compilers to allow puppetdb on the compilers to be reconfigured with the new primary database server.
+ 1. Starts the puppetdb service on the compilers.
+ 1. Restarts the puppetserver service on the compilers.
+
+1. Perform a `recovery-db` restore of your database server, specifying the backup file that you want to use. For example:
+ ```
+ bolt plan run peadm::restore --targets my.primary.vm input_file="/tmp/my_backup.tar.gz" restore_type=recovery-db
+ ```
+ **Important**: You must use the `restore_type=recovery-db` parameter to recover the database server.
+
+ **Important**: You must specify the primary server host node (not the database server host node) as the target for the restore plan.
diff --git a/functions/migration_opts_default.pp b/functions/migration_opts_default.pp
new file mode 100644
index 00000000..99b57e55
--- /dev/null
+++ b/functions/migration_opts_default.pp
@@ -0,0 +1,12 @@
+function peadm::migration_opts_default () {
+ {
+ 'activity' => true,
+ 'ca' => true,
+ 'classifier' => true,
+ 'code' => false,
+ 'config' => false,
+ 'orchestrator' => true,
+ 'puppetdb' => true,
+ 'rbac' => true,
+ }
+}
diff --git a/functions/recovery_opts_all.pp b/functions/recovery_opts_all.pp
new file mode 100644
index 00000000..e738ab5e
--- /dev/null
+++ b/functions/recovery_opts_all.pp
@@ -0,0 +1,12 @@
+function peadm::recovery_opts_all () {
+ {
+ 'activity' => true,
+ 'ca' => true,
+ 'classifier' => true,
+ 'code' => true,
+ 'config' => true,
+ 'orchestrator' => true,
+ 'puppetdb' => true,
+ 'rbac' => true,
+ }
+}
diff --git a/functions/recovery_opts_default.pp b/functions/recovery_opts_default.pp
index ef5fdfae..5e4b2f4d 100644
--- a/functions/recovery_opts_default.pp
+++ b/functions/recovery_opts_default.pp
@@ -1,10 +1,12 @@
function peadm::recovery_opts_default () {
{
- 'orchestrator' => true,
+ 'activity' => false,
+ 'ca' => true,
+ 'classifier' => false,
+ 'code' => true,
+ 'config' => true,
+ 'orchestrator' => false,
'puppetdb' => true,
- 'rbac' => true,
- 'activity' => true,
- 'ca' => false,
- 'classifier' => true,
+ 'rbac' => false,
}
}
diff --git a/plans/backup.pp b/plans/backup.pp
new file mode 100644
index 00000000..80835eb0
--- /dev/null
+++ b/plans/backup.pp
@@ -0,0 +1,198 @@
+# @summary Backup puppet primary configuration
+#
+# @param targets This should be the primary puppetserver for the puppet cluster
+# @param backup_type Currently, the recovery and custom backup types are supported
+# @param backup A hash of custom backup options, see the peadm::recovery_opts_default() function for the default values
+# @param output_directory The directory to place the backup in
+# @example
+# bolt plan run peadm::backup -t primary1.example.com
+#
+plan peadm::backup (
+ # This plan should be run on the primary server
+ Peadm::SingleTargetSpec $targets,
+
+ # backup type determines the backup options
+ Enum['recovery', 'custom'] $backup_type = 'recovery',
+
+ # Which data to backup
+ Peadm::Recovery_opts $backup = {},
+
+ # Where to put the backup folder
+ String $output_directory = '/tmp',
+) {
+ peadm::assert_supported_bolt_version()
+
+ $cluster = run_task('peadm::get_peadm_config', $targets).first.value
+ $error = getvar('cluster.error')
+ if $error {
+ fail_plan($error)
+ }
+ $arch = peadm::assert_supported_architecture(
+ getvar('cluster.params.primary_host'),
+ getvar('cluster.params.replica_host'),
+ getvar('cluster.params.primary_postgresql_host'),
+ getvar('cluster.params.replica_postgresql_host'),
+ getvar('cluster.params.compiler_hosts'),
+ )
+
+ $recovery_opts = $backup_type? {
+ 'recovery' => peadm::recovery_opts_default(),
+ 'migration' => peadm::migration_opts_default(),
+ 'custom' => peadm::recovery_opts_all() + $backup,
+ }
+
+ $timestamp = Timestamp.new().strftime('%Y-%m-%dT%H%M%SZ')
+ $backup_directory = "${output_directory}/pe-backup-${timestamp}"
+
+ $primary_target = getvar('cluster.params.primary_host')
+ $puppetdb_postgresql_target = getvar('cluster.params.primary_postgresql_host') ? {
+ undef => getvar('cluster.params.primary_host'),
+ default => getvar('cluster.params.primary_postgresql_host'),
+ }
+
+ $backup_databases = {
+ 'orchestrator' => $primary_target,
+ 'activity' => $primary_target,
+ 'rbac' => $primary_target,
+ 'puppetdb' => $puppetdb_postgresql_target,
+ }.filter |$key,$_| {
+ $recovery_opts[$key] == true
+ }
+
+ # Create backup folders
+ apply($targets) {
+ file { $backup_directory :
+ ensure => 'directory',
+ owner => 'root',
+ group => 'root',
+ mode => '0711',
+ }
+
+ # create a backup subdir for peadm configration
+ file { "${backup_directory}/peadm":
+ ensure => 'directory',
+ owner => 'root',
+ group => 'root',
+ mode => '0711',
+ }
+
+ # backup the cluster config
+ file { "${backup_directory}/peadm/peadm_config.json":
+ content => stdlib::to_json_pretty($cluster),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+
+ # backup the recovery options
+ file { "${backup_directory}/peadm/recovery_opts.json":
+ content => stdlib::to_json_pretty($recovery_opts),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+
+ # Create a subdir for each backup type selected
+ $recovery_opts.filter |$_,$val| { $val == true }.each |$dir,$_| {
+ file { "${backup_directory}/${dir}":
+ ensure => 'directory',
+ owner => 'root',
+ group => 'root',
+ mode => '0711',
+ }
+ }
+
+ if $backup_type == 'recovery' {
+ # create a backup subdir for recovery configuration
+ file { "${backup_directory}/recovery":
+ ensure => 'directory',
+ owner => 'root',
+ group => 'root',
+ mode => '0711',
+ }
+ }
+ }
+
+ if getvar('recovery_opts.classifier') {
+ out::message('# Backing up classification')
+ run_task('peadm::backup_classification', $targets,
+ directory => "${backup_directory}/classifier",
+ )
+ }
+
+ if $backup_type == 'recovery' {
+ out::message('# Backing up ca, certs, code and config for recovery')
+# lint:ignore:strict_indent
+ run_command(@("CMD"), $targets)
+ /opt/puppetlabs/bin/puppet-backup create --dir=${shellquote($backup_directory)}/recovery --scope=certs,code,config
+ | CMD
+# lint:endignore
+ } else {
+ if getvar('recovery_opts.ca') {
+ out::message('# Backing up ca and ssl certificates')
+ # lint:ignore:strict_indent
+ run_command(@("CMD"), $targets)
+ /opt/puppetlabs/bin/puppet-backup create --dir=${shellquote($backup_directory)}/ca --scope=certs
+ | CMD
+ }
+
+ if getvar('recovery_opts.code') {
+ out::message('# Backing up code')
+ # run_command("chown pe-postgres ${shellquote($backup_directory)}/code", $targets)
+ run_command(@("CMD"), $targets)
+ /opt/puppetlabs/bin/puppet-backup create --dir=${shellquote($backup_directory)}/code --scope=code
+ | CMD
+ }
+
+ if getvar('recovery_opts.config') {
+ out::message('# Backing up config')
+ run_command("chown pe-postgres ${shellquote($backup_directory)}/config", $targets)
+ run_command(@("CMD"), $targets)
+ /opt/puppetlabs/bin/puppet-backup create --dir=${shellquote($backup_directory)}/config --scope=config
+ | CMD
+ }
+ }
+ # Check if /etc/puppetlabs/console-services/conf.d/secrets/keys.json exists and if so back it up
+ if getvar('recovery_opts.rbac') {
+ out::message('# Backing up ldap secret key if it exists')
+# lint:ignore:140chars
+ run_command(@("CMD"/L), $targets)
+ test -f /etc/puppetlabs/console-services/conf.d/secrets/keys.json \
+ && cp -rp /etc/puppetlabs/console-services/conf.d/secrets ${shellquote($backup_directory)}/rbac/ \
+ || echo secret ldap key doesnt exist
+ | CMD
+# lint:endignore
+ }
+# lint:ignore:140chars
+ # IF backing up orchestrator back up the secrets too /etc/puppetlabs/orchestration-services/conf.d/secrets/
+ if getvar('recovery_opts.orchestrator') {
+ out::message('# Backing up orchestrator secret keys')
+ run_command(@("CMD"), $targets)
+ cp -rp /etc/puppetlabs/orchestration-services/conf.d/secrets ${shellquote($backup_directory)}/orchestrator/
+ | CMD
+ }
+# lint:endignore
+ $backup_databases.each |$name,$database_target| {
+ out::message("# Backing up database pe-${shellquote($name)}")
+ run_command(@("CMD"/L), $targets)
+ /opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 \
+ -f ${shellquote($backup_directory)}/${shellquote($name)}/pe-${shellquote($name)}.dump.d \
+ "sslmode=verify-ca \
+ host=${shellquote($database_target.peadm::certname())} \
+ user=pe-${shellquote($name)} \
+ sslcert=/etc/puppetlabs/puppetdb/ssl/${shellquote($primary_target.peadm::certname())}.cert.pem \
+ sslkey=/etc/puppetlabs/puppetdb/ssl/${shellquote($primary_target.peadm::certname())}.private_key.pem \
+ sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem \
+ dbname=pe-${shellquote($name)}"
+ | CMD
+ }
+
+ run_command(@("CMD"/L), $targets)
+ umask 0077 \
+ && cd ${shellquote(dirname($backup_directory))} \
+ && tar -czf ${shellquote($backup_directory)}.tar.gz ${shellquote(basename($backup_directory))} \
+ && rm -rf ${shellquote($backup_directory)}
+ | CMD
+# lint:endignore
+ return({ 'path' => "${backup_directory}.tar.gz" })
+}
diff --git a/plans/restore.pp b/plans/restore.pp
new file mode 100644
index 00000000..453bf8e5
--- /dev/null
+++ b/plans/restore.pp
@@ -0,0 +1,323 @@
+# @summary Restore puppet primary configuration
+#
+# @param targets This should be the primary puppetserver for the puppet cluster
+# @param restore_type Choose from `recovery`, `recovery-db` and `custom`
+# @param restore A hash of custom backup options, see the peadm::recovery_opts_default() function for the default values
+# @param input_file The file containing the backup to restore from
+# @example
+# bolt plan run peadm::restore -t primary1.example.com input_file=/tmp/peadm-backup.tar.gz
+#
+plan peadm::restore (
+ # This plan should be run on the primary server
+ Peadm::SingleTargetSpec $targets,
+
+ # restore type determines the restore options
+ Enum['recovery', 'recovery-db', 'custom'] $restore_type = 'recovery',
+
+ # Which data to restore
+ Peadm::Recovery_opts $restore = {},
+
+ # Path to the recovery tarball
+ Pattern[/.*\.tar\.gz$/] $input_file,
+) {
+ peadm::assert_supported_bolt_version()
+
+ $recovery_directory = "${dirname($input_file)}/${basename($input_file, '.tar.gz')}"
+# lint:ignore:strict_indent
+ run_command(@("CMD"/L), $targets)
+ umask 0077 \
+ && cd ${shellquote(dirname($recovery_directory))} \
+ && tar -xzf ${shellquote($input_file)}
+ | CMD
+# lint:endignore
+
+ # try to load the cluster configuration by running peadm::get_peadm_config, but allow for errors to happen
+ $_cluster = run_task('peadm::get_peadm_config', $targets, { '_catch_errors' => true }).first.value
+
+ if $_cluster == undef or getvar('_cluster.params') == undef {
+ # failed to get cluster config, load from backup
+ out::message('Failed to get cluster configuration, loading from backup...')
+ $result = download_file("${recovery_directory}/peadm/peadm_config.json", 'peadm_config.json', $targets).first.value
+ $cluster = loadjson(getvar('result.path'))
+ out::message('Cluster configuration loaded from backup')
+ } else {
+ $cluster = $_cluster
+ }
+
+ out::message("cluster: ${cluster}")
+
+ $error = getvar('cluster.error')
+ if $error {
+ fail_plan($error)
+ }
+
+ $arch = peadm::assert_supported_architecture(
+ getvar('cluster.params.primary_host'),
+ getvar('cluster.params.replica_host'),
+ getvar('cluster.params.primary_postgresql_host'),
+ getvar('cluster.params.replica_postgresql_host'),
+ getvar('cluster.params.compiler_hosts'),
+ )
+
+ $recovery_opts = $restore_type? {
+ 'recovery' => peadm::recovery_opts_default(),
+ 'recovery-db' => { 'puppetdb' => true, },
+ 'migration' => peadm::migration_opts_default(),
+ 'custom' => peadm::recovery_opts_all() + $restore,
+ }
+
+ $primary_target = peadm::get_targets(getvar('cluster.params.primary_host'), 1)
+ $replica_target = peadm::get_targets(getvar('cluster.params.replica_host'), 1)
+ $compiler_targets = peadm::get_targets(getvar('cluster.params.compiler_hosts'))
+
+ # Determine the array of targets to which the PuppetDB PostgreSQL database
+ # should be restored to. This could be as simple as just the primary server,
+ # or it could be two separate PostgreSQL servers.
+ $puppetdb_postgresql_targets = peadm::flatten_compact([
+ getvar('cluster.params.primary_postgresql_host') ? {
+ undef => $primary_target,
+ default => peadm::get_targets(getvar('cluster.params.primary_postgresql_host'), 1),
+ },
+ getvar('cluster.params.replica_postgresql_host') ? {
+ undef => $replica_target,
+ default => peadm::get_targets(getvar('cluster.params.replica_postgresql_host'), 1),
+ },
+ ])
+
+ $puppetdb_targets = peadm::flatten_compact([
+ $primary_target,
+ $replica_target,
+ $compiler_targets,
+ ])
+
+ # Map of recovery option name to array of database hosts to restore the
+ # relevant .dump content to.
+ $restore_databases = {
+ 'orchestrator' => [$primary_target],
+ 'activity' => [$primary_target],
+ 'rbac' => [$primary_target],
+ 'puppetdb' => $puppetdb_postgresql_targets,
+ }.filter |$key,$_| {
+ $recovery_opts[$key] == true
+ }
+
+ if getvar('recovery_opts.classifier') {
+ if $restore_type == 'migration' {
+ out::message('# Migrating classification')
+ run_task('peadm::backup_classification', $primary_target,
+ directory => $recovery_directory
+ )
+
+ run_task('peadm::transform_classification_groups', $primary_target,
+ source_directory => "${recovery_directory}/classifier",
+ working_directory => $recovery_directory
+ )
+
+ run_task('peadm::restore_classification', $primary_target,
+ classification_file => "${recovery_directory}/transformed_classification.json",
+ )
+ } else {
+ run_task('peadm::restore_classification', $primary_target,
+ classification_file => "${recovery_directory}/classifier/classification_backup.json",
+ )
+ }
+ }
+
+ if $restore_type == 'recovery' {
+ out::message('# Restoring ca, certs, code and config for recovery')
+ # lint:ignore:strict_indent
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-backup restore \
+ --scope=certs,code,config \
+ --tempdir=${shellquote($recovery_directory)} \
+ --force \
+ ${shellquote($recovery_directory)}/recovery/pe_backup-*tgz
+ | CMD
+ # lint:endignore
+ } elsif $restore_type == 'recovery-db' {
+ out::message('# Restoring primary database for recovery')
+ } else {
+ if getvar('recovery_opts.ca') {
+ out::message('# Restoring ca and ssl certificates')
+ # lint:ignore:strict_indent
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-backup restore \
+ --scope=certs \
+ --tempdir=${shellquote($recovery_directory)} \
+ --force \
+ ${shellquote($recovery_directory)}/ca/pe_backup-*tgz
+ | CMD
+ }
+
+ if getvar('recovery_opts.code') {
+ out::message('# Restoring code')
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-backup restore \
+ --scope=code \
+ --tempdir=${shellquote($recovery_directory)} \
+ --force \
+ ${shellquote($recovery_directory)}/code/pe_backup-*tgz
+ | CMD
+ }
+
+ if getvar('recovery_opts.config') {
+ out::message('# Restoring config')
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-backup restore \
+ --scope=config \
+ --tempdir=${shellquote($recovery_directory)} \
+ --force \
+ ${shellquote($recovery_directory)}/config/pe_backup-*tgz
+ | CMD
+ }
+ }
+ # Use PuppetDB's /pdb/admin/v1/archive API to SAVE data currently in PuppetDB.
+ # Otherwise we'll completely lose it if/when we restore.
+ # TODO: consider adding a heuristic to skip when innappropriate due to size
+ # or other factors.
+ if getvar('recovery_opts.puppetdb') and $restore_type == 'migration' {
+ out::message('# Exporting puppetdb')
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-db export \
+ --cert=$(/opt/puppetlabs/bin/puppet config print hostcert) \
+ --key=$(/opt/puppetlabs/bin/puppet config print hostprivkey) \
+ ${shellquote($recovery_directory)}/puppetdb-archive.bin
+ | CMD
+ }
+
+ ## shutdown services
+ run_command(@("CMD"/L), $primary_target)
+ systemctl stop pe-console-services pe-nginx pxp-agent pe-puppetserver \
+ pe-orchestration-services puppet pe-puppetdb
+ | CMD
+
+ # Restore secrets/keys.json if it exists
+ out::message('# Restoring ldap secret key if it exists')
+ run_command(@("CMD"/L), $primary_target)
+ test -f ${shellquote($recovery_directory)}/rbac/keys.json \
+ && cp -rp ${shellquote($recovery_directory)}/keys.json /etc/puppetlabs/console-services/conf.d/secrets/ \
+ || echo secret ldap key doesnt exist
+ | CMD
+# lint:ignore:140chars
+ # IF restoring orchestrator restore the secrets to /etc/puppetlabs/orchestration-services/conf.d/secrets/
+ if getvar('recovery_opts.orchestrator') {
+ out::message('# Restoring orchestrator secret keys')
+ run_command(@("CMD"/L), $primary_target)
+ cp -rp ${shellquote($recovery_directory)}/orchestrator/secrets/* /etc/puppetlabs/orchestration-services/conf.d/secrets/
+ | CMD
+ }
+# lint:endignore
+
+ #$database_to_restore.each |Integer $index, Boolean $value | {
+ $restore_databases.each |$name,$database_targets| {
+ out::message("# Restoring database pe-${name}")
+ $dbname = "pe-${shellquote($name)}"
+
+ # Drop pglogical extensions and schema if present
+ run_command(@("CMD"/L), $database_targets)
+ su - pe-postgres -s /bin/bash -c \
+ "/opt/puppetlabs/server/bin/psql \
+ --tuples-only \
+ -d '${dbname}' \
+ -c 'DROP SCHEMA IF EXISTS pglogical CASCADE;'"
+ | CMD
+
+ run_command(@("CMD"/L), $database_targets)
+ su - pe-postgres -s /bin/bash -c \
+ "/opt/puppetlabs/server/bin/psql \
+ -d '${dbname}' \
+ -c 'DROP SCHEMA public CASCADE; CREATE SCHEMA public;'"
+ | CMD
+
+ # To allow db user to restore the database grant temporary privileges
+ run_command(@("CMD"/L), $database_targets)
+ su - pe-postgres -s /bin/bash -c \
+ "/opt/puppetlabs/server/bin/psql \
+ -d '${dbname}' \
+ -c 'ALTER USER \"${dbname}\" WITH SUPERUSER;'"
+ | CMD
+
+ # Restore database. If there are multiple database restore targets, perform
+ # the restore(s) in parallel.
+ parallelize($database_targets) |$database_target| {
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/server/bin/pg_restore \
+ -j 4 \
+ -d "sslmode=verify-ca \
+ host=${shellquote($database_target.peadm::certname())} \
+ sslcert=/etc/puppetlabs/puppetdb/ssl/${shellquote($primary_target.peadm::certname())}.cert.pem \
+ sslkey=/etc/puppetlabs/puppetdb/ssl/${shellquote($primary_target.peadm::certname())}.private_key.pem \
+ sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem \
+ dbname=${dbname} \
+ user=${dbname}" \
+ -Fd ${recovery_directory}/${name}/${dbname}.dump.d
+ | CMD
+ }
+
+ # Remove db user privileges post restore
+ run_command(@("CMD"/L), $database_targets)
+ su - pe-postgres -s /bin/bash -c \
+ "/opt/puppetlabs/server/bin/psql \
+ -d '${dbname}' \
+ -c 'ALTER USER \"${dbname}\" WITH NOSUPERUSER;'"
+ | CMD
+
+ # Drop pglogical extension and schema (again) if present after db restore
+ run_command(@("CMD"/L), $database_targets)
+ su - pe-postgres -s /bin/bash -c \
+ "/opt/puppetlabs/server/bin/psql \
+ --tuples-only \
+ -d '${dbname}' \
+ -c 'DROP SCHEMA IF EXISTS pglogical CASCADE;'"
+ | CMD
+
+ run_command(@("CMD"/L), $database_targets)
+ su - pe-postgres -s /bin/bash -c \
+ "/opt/puppetlabs/server/bin/psql \
+ -d '${dbname}' \
+ -c 'DROP EXTENSION IF EXISTS pglogical CASCADE;'"
+ | CMD
+ }
+
+ # Use `puppet infra` to ensure correct file permissions, restart services,
+ # etc. Make sure not to try and get config data from the classifier, which
+ # isn't yet up and running.
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-infrastructure configure --no-recover
+ | CMD
+
+ # If we have replicas reinitalise them
+ run_command(@("CMD"/L), $replica_target)
+ /opt/puppetlabs/bin/puppet-infra reinitialize replica -y
+ | CMD
+
+ # Use PuppetDB's /pdb/admin/v1/archive API to MERGE previously saved data
+ # into the restored database.
+ # TODO: consider adding a heuristic to skip when innappropriate due to size
+ # or other factors.
+ if getvar('recovery_opts.puppetdb') and $restore_type == 'migration' {
+ run_command(@("CMD"/L), $primary_target)
+ /opt/puppetlabs/bin/puppet-db import \
+ --cert=$(/opt/puppetlabs/bin/puppet config print hostcert) \
+ --key=$(/opt/puppetlabs/bin/puppet config print hostprivkey) \
+ ${shellquote($recovery_directory)}/puppetdb-archive.bin
+ | CMD
+# lint:endignore
+ }
+
+# Run Puppet to pick up last remaining config tweaks
+ run_task('peadm::puppet_runonce', $primary_target)
+
+ if $restore_type == 'recovery-db' {
+ run_task('peadm::puppet_runonce', $puppetdb_postgresql_targets)
+ }
+
+ apply($primary_target) {
+ file { $recovery_directory :
+ ensure => 'absent',
+ force => true,
+ }
+ }
+
+ return('success')
+}
diff --git a/plans/restore_ca.pp b/plans/restore_ca.pp
index d5ff84da..82d0a961 100644
--- a/plans/restore_ca.pp
+++ b/plans/restore_ca.pp
@@ -1,13 +1,13 @@
plan peadm::restore_ca(
Peadm::SingleTargetSpec $target,
String $file_path,
- Optional[String] $recovery_directory = '/tmp/peadm_recovery',
+ Optional[String] $recovery_directory = '/tmp/peadm_recovery',
) {
out::message('# Restoring ca and ssl certificates')
# lint:ignore:strict_indent
run_command(@("CMD"/L), $target)
- /opt/puppetlabs/bin/puppet-backup restore \
+ /opt/puppetlabs/bin/puppet-backup restore \
--scope=certs \
--tempdir=${shellquote($recovery_directory)} \
--force \
diff --git a/plans/util/init_db_server.pp b/plans/util/init_db_server.pp
new file mode 100644
index 00000000..0b5cf2be
--- /dev/null
+++ b/plans/util/init_db_server.pp
@@ -0,0 +1,91 @@
+plan peadm::util::init_db_server(
+ String[1] $db_host,
+ Boolean $install_pe = false,
+ String[1] $pe_version = '2023.5.0',
+ String[1] $pe_platform = 'el-8-x86_64',
+) {
+ $t = get_targets('*')
+ wait_until_available($t)
+
+ $db_target = get_target($db_host)
+ parallelize($t + $db_target) |$target| {
+ $fqdn = run_command('hostname -f', $target)
+ $target.set_var('certname', $fqdn.first['stdout'].chomp)
+ }
+
+ $primary_target = $t.filter |$n| { $n.vars['role'] == 'primary' }[0]
+ $compiler_targets = $t.filter |$n| { $n.vars['role'] == 'compiler' }
+
+ out::message("db_target: ${db_target}")
+ out::message("db_target certname: ${db_target.peadm::certname()}")
+ out::message("primary_target: ${primary_target}")
+ out::message("compiler_targets: ${compiler_targets}")
+ run_command("/opt/puppetlabs/bin/puppetserver ca clean --certname ${db_target.peadm::certname()}", $primary_target)
+
+# lint:ignore:strict_indent
+ run_task('peadm::mkdir_p_file', $db_target,
+ path => '/etc/puppetlabs/puppet/puppet.conf',
+ content => @("HEREDOC"),
+ [main]
+ certname = ${db_target.peadm::certname()}
+ | HEREDOC
+# lint:endignore
+ )
+
+ run_plan('peadm::util::insert_csr_extension_requests', $db_target,
+ extension_requests => {
+ peadm::oid('peadm_role') => 'puppet/puppetdb-database',
+ peadm::oid('peadm_availability_group') => 'A',
+ })
+
+ $uploaddir = '/tmp'
+ $pe_tarball_name = "puppet-enterprise-${pe_version}-${pe_platform}.tar.gz"
+ $pe_tarball_source = "https://s3.amazonaws.com/pe-builds/released/${pe_version}/${pe_tarball_name}"
+ $upload_tarball_path = "${uploaddir}/${pe_tarball_name}"
+
+ run_task('peadm::download', $db_target,
+ source => $pe_tarball_source,
+ path => $upload_tarball_path,
+ )
+
+ run_command('systemctl stop pe-puppetdb', $compiler_targets, { _catch_errors => true })
+ # run_task('service', $primary_target, { action => 'restart', name => 'pe-puppetdb', _catch_errors => true })
+
+ if $install_pe {
+ $pe_conf_data = {}
+
+ $puppetdb_database_temp_config = {
+ 'puppet_enterprise::profile::database::puppetdb_hosts' => (
+ $compiler_targets + $primary_target
+ ).map |$t| { $t.peadm::certname() },
+ }
+
+ $primary_postgresql_pe_conf = peadm::generate_pe_conf({
+ 'console_admin_password' => 'not used',
+ 'puppet_enterprise::puppet_master_host' => $primary_target.peadm::certname(),
+ 'puppet_enterprise::database_host' => $db_target.peadm::certname(),
+ } + $puppetdb_database_temp_config + $pe_conf_data)
+
+ # Upload the pe.conf files to the hosts that need them, and ensure correctly
+ # configured certnames. Right now for these hosts we need to do that by
+ # staging a puppet.conf file.
+
+ peadm::file_content_upload($primary_postgresql_pe_conf, '/tmp/pe.conf', $db_target)
+
+ # Run the PE installer on the puppetdb database hosts
+ run_task('peadm::pe_install', $db_target,
+ tarball => $upload_tarball_path,
+ peconf => '/tmp/pe.conf',
+ puppet_service_ensure => 'stopped',
+ )
+ }
+
+ run_plan('peadm::subplans::component_install', $db_target, {
+ primary_host => $primary_target,
+ avail_group_letter => 'A',
+ role => 'puppet/puppetdb-database',
+ })
+ run_task('peadm::puppet_runonce', $compiler_targets)
+ run_command('systemctl start pe-puppetdb', $compiler_targets, { _catch_errors => true })
+ run_task('service', $compiler_targets, { action => 'restart', name => 'pe-puppetserver', _catch_errors => true })
+}
diff --git a/plans/util/sanitize_pg_pe_conf.pp b/plans/util/sanitize_pg_pe_conf.pp
index 5888bd09..c3033bd0 100644
--- a/plans/util/sanitize_pg_pe_conf.pp
+++ b/plans/util/sanitize_pg_pe_conf.pp
@@ -11,11 +11,15 @@
run_task('peadm::read_file', $targets,
path => $path,
).map |$result| {
- $sanitized = $result['content'].loadjson() + {
+ out::message("Sanitizing pe.conf on ${result.value}")
+ $sanitized = $result.value['content'].parsejson() + {
'puppet_enterprise::puppet_master_host' => $primary_target.peadm::certname(),
- 'puppet_enterprise::database_host' => $result.target.peadm::certname(),
+ 'puppet_enterprise::puppetdb_database_host' => $result.target.peadm::certname(),
}
+
+ out::message("Sanitized ${sanitized}")
+
# Return the result of file_content_upload. There is only one target
- peadm::file_content_upload($sanitized, $path, $result.target)[0]
+ peadm::file_content_upload(stdlib::to_json_pretty($sanitized), $path, $result.target)[0]
}
}
diff --git a/spec/acceptance/peadm_spec/plans/add_inventory_hostnames.pp b/spec/acceptance/peadm_spec/plans/add_inventory_hostnames.pp
new file mode 100644
index 00000000..89914ac6
--- /dev/null
+++ b/spec/acceptance/peadm_spec/plans/add_inventory_hostnames.pp
@@ -0,0 +1,13 @@
+plan peadm_spec::add_inventory_hostnames(
+ String[1] $inventory_file
+) {
+ $t = get_targets('*')
+ wait_until_available($t)
+
+ parallelize($t) |$target| {
+ $fqdn = run_command('hostname -f', $target)
+ $target.set_var('certname', $fqdn.first['stdout'].chomp)
+ $command = "yq eval '(.groups[].targets[] | select(.uri == \"${target.uri}\").name) = \"${target.vars['certname']}\"' -i ${inventory_file}"
+ run_command($command, 'localhost')
+ }
+}
diff --git a/spec/acceptance/peadm_spec/plans/puppet_run_test.pp b/spec/acceptance/peadm_spec/plans/puppet_run_test.pp
new file mode 100644
index 00000000..ceca6329
--- /dev/null
+++ b/spec/acceptance/peadm_spec/plans/puppet_run_test.pp
@@ -0,0 +1,20 @@
+plan peadm_spec::puppet_run_test() {
+ $t = get_targets('*')
+ wait_until_available($t)
+
+ parallelize($t) |$target| {
+ $fqdn = run_command('hostname -f', $target)
+ $cert = $target.set_var('certname', $fqdn.first['stdout'].chomp)
+
+ out::message("Running puppet on host ${cert}.")
+
+ $status = run_task('peadm::puppet_runonce', $target).first.status
+
+ # Checking for success based on the exit code
+ if $status == 'success' {
+ out::message("Puppet run succeeded on ${cert}.")
+ } else {
+ fail_plan("Puppet run failed on ${cert}.")
+ }
+ }
+}
diff --git a/spec/acceptance/peadm_spec/plans/test_backup.pp b/spec/acceptance/peadm_spec/plans/test_backup.pp
new file mode 100644
index 00000000..110b21cf
--- /dev/null
+++ b/spec/acceptance/peadm_spec/plans/test_backup.pp
@@ -0,0 +1,23 @@
+plan peadm_spec::test_backup() {
+ $t = get_targets('*')
+ wait_until_available($t)
+
+ parallelize($t) |$target| {
+ $fqdn = run_command('hostname -f', $target)
+ $target.set_var('certname', $fqdn.first['stdout'].chomp)
+ }
+
+ # run infra status on the primary
+ $primary_host = $t.filter |$n| { $n.vars['role'] == 'primary' }[0]
+ out::message("Running peadm::status on primary host ${primary_host}")
+ $result = run_plan('peadm::status', $primary_host, { 'format' => 'json' })
+
+ out::message($result)
+
+ if empty($result['failed']) {
+ out::message('Cluster is healthy, continuing')
+ } else {
+ fail_plan('Cluster is not healthy, aborting')
+ }
+ run_plan('peadm::backup', $primary_host, { 'output_directory' => '/tmp', 'backup_type' => 'recovery' })
+}
diff --git a/spec/acceptance/peadm_spec/plans/test_restore.pp b/spec/acceptance/peadm_spec/plans/test_restore.pp
new file mode 100644
index 00000000..d09e84d8
--- /dev/null
+++ b/spec/acceptance/peadm_spec/plans/test_restore.pp
@@ -0,0 +1,46 @@
+#TODO parametrize the plan so it can do:
+# - a recovery restore of the primary server
+# - a recovery restore of the primary db server
+plan peadm_spec::test_restore(
+ # restore type determines the restore options
+ Enum['recovery', 'recovery-db'] $restore_type = 'recovery',
+
+) {
+ $t = get_targets('*')
+ wait_until_available($t)
+
+ parallelize($t) |$target| {
+ $fqdn = run_command('hostname -f', $target)
+ $certname = $fqdn.first['stdout'].chomp
+ $target.set_var('certname', $certname)
+ }
+
+ $targets_with_name = $t.map |$target| {
+ Target.new({
+ 'uri' => $target.uri,
+ 'name' => $target.vars['certname'],
+ 'config' => $target.config,
+ 'vars' => $target.vars,
+ })
+ }
+
+ $primary_host = $targets_with_name.filter |$n| { $n.vars['role'] == 'primary' }[0]
+
+ # get the latest backup file, if more than one exists
+ $result = run_command('ls -t /tmp/pe-backup*gz | head -1', $primary_host).first.value
+ $input_file = strip(getvar('result.stdout'))
+
+ run_plan('peadm::restore', $primary_host, { 'restore_type' => $restore_type, 'input_file' => $input_file })
+
+ # run infra status on the primary
+ out::message("Running peadm::status on primary host ${primary_host}")
+ $status = run_plan('peadm::status', $primary_host, { 'format' => 'json' })
+
+ out::message($status)
+
+ if empty($status['failed']) {
+ out::message('Cluster is healthy, continuing')
+ } else {
+ fail_plan('Cluster is not healthy, aborting')
+ }
+}
diff --git a/spec/fixtures/peadm_config.json b/spec/fixtures/peadm_config.json
new file mode 100644
index 00000000..cbd5db76
--- /dev/null
+++ b/spec/fixtures/peadm_config.json
@@ -0,0 +1,3 @@
+{
+ "params": { "primary_host": "primary", "primary_postgresql_host": "postgres" }
+}
diff --git a/spec/plans/backup_spec.rb b/spec/plans/backup_spec.rb
new file mode 100644
index 00000000..d6b2ff75
--- /dev/null
+++ b/spec/plans/backup_spec.rb
@@ -0,0 +1,115 @@
+# frozen_string_literal: true
+
+# rubocop:disable Layout/LineLength
+
+require 'spec_helper'
+
+describe 'peadm::backup' do
+ include BoltSpec::Plans
+ let(:default_params) { { 'targets' => 'primary', 'backup_type' => 'recovery' } }
+ let(:classifier_only) do
+ {
+ 'targets' => 'primary',
+ 'backup_type' => 'custom',
+ 'backup' => {
+ 'activity' => false,
+ 'ca' => false,
+ 'classifier' => true,
+ 'code' => false,
+ 'config' => false,
+ 'orchestrator' => false,
+ 'puppetdb' => false,
+ 'rbac' => false,
+ }
+ }
+ end
+ let(:all_backup_options) do
+ {
+ 'targets' => 'primary',
+ 'backup_type' => 'custom',
+ 'backup' => {} # set all to true
+ }
+ end
+
+ before(:each) do
+ # define a zero timestamp
+ mocktime = Puppet::Pops::Time::Timestamp.new(0)
+ # mock the timestamp to always return the zero timestamp
+ # so the directory name is always the same - /tmp/pe-backup-1970-01-01T000000Z
+ allow(Puppet::Pops::Time::Timestamp).to receive(:now).and_return(mocktime)
+
+ allow_apply
+
+ expect_task('peadm::get_peadm_config').always_return({
+ 'params' => {
+ 'primary_host' => 'primary',
+ 'primary_postgresql_host' => 'postgres',
+ }
+ })
+ end
+
+ it 'runs with backup type recovery' do
+ expect_out_message.with_params('# Backing up ca, certs, code and config for recovery')
+ expect_out_message.with_params('# Backing up database pe-puppetdb')
+
+ expect_command("/opt/puppetlabs/bin/puppet-backup create --dir=/tmp/pe-backup-1970-01-01T000000Z/recovery --scope=certs,code,config\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /tmp/pe-backup-1970-01-01T000000Z/puppetdb/pe-puppetdb.dump.d "sslmode=verify-ca host=postgres user=pe-puppetdb sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-puppetdb"' + "\n")
+ expect_command('umask 0077 && cd /tmp && tar -czf /tmp/pe-backup-1970-01-01T000000Z.tar.gz pe-backup-1970-01-01T000000Z && rm -rf /tmp/pe-backup-1970-01-01T000000Z' + "\n")
+
+ expect(run_plan('peadm::backup', default_params)).to be_ok
+ end
+
+ it 'runs with backup type recovery by default' do
+ expect_out_message.with_params('# Backing up ca, certs, code and config for recovery')
+ expect_out_message.with_params('# Backing up database pe-puppetdb')
+
+ expect_command("/opt/puppetlabs/bin/puppet-backup create --dir=/tmp/pe-backup-1970-01-01T000000Z/recovery --scope=certs,code,config\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /tmp/pe-backup-1970-01-01T000000Z/puppetdb/pe-puppetdb.dump.d "sslmode=verify-ca host=postgres user=pe-puppetdb sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-puppetdb"' + "\n")
+ expect_command('umask 0077 && cd /tmp && tar -czf /tmp/pe-backup-1970-01-01T000000Z.tar.gz pe-backup-1970-01-01T000000Z && rm -rf /tmp/pe-backup-1970-01-01T000000Z' + "\n")
+
+ expect(run_plan('peadm::backup', { 'targets' => 'primary' })).to be_ok
+ end
+
+ it 'runs with backup and defined output folder' do
+ expect_out_message.with_params('# Backing up ca, certs, code and config for recovery')
+ expect_out_message.with_params('# Backing up database pe-puppetdb')
+
+ expect_command("/opt/puppetlabs/bin/puppet-backup create --dir=/user/home/folder/pe-backup-1970-01-01T000000Z/recovery --scope=certs,code,config\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /user/home/folder/pe-backup-1970-01-01T000000Z/puppetdb/pe-puppetdb.dump.d "sslmode=verify-ca host=postgres user=pe-puppetdb sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-puppetdb"' + "\n")
+ expect_command('umask 0077 && cd /user/home/folder && tar -czf /user/home/folder/pe-backup-1970-01-01T000000Z.tar.gz pe-backup-1970-01-01T000000Z && rm -rf /user/home/folder/pe-backup-1970-01-01T000000Z' + "\n")
+
+ expect(run_plan('peadm::backup', { 'targets' => 'primary', 'output_directory' => '/user/home/folder' })).to be_ok
+ end
+
+ it 'runs with backup type custom, classifier only' do
+ expect_task('peadm::backup_classification').with_params({ 'directory' => '/tmp/pe-backup-1970-01-01T000000Z/classifier' })
+ expect_out_message.with_params('# Backing up classification')
+ expect_command('umask 0077 && cd /tmp && tar -czf /tmp/pe-backup-1970-01-01T000000Z.tar.gz pe-backup-1970-01-01T000000Z && rm -rf /tmp/pe-backup-1970-01-01T000000Z' + "\n")
+
+ expect(run_plan('peadm::backup', classifier_only)).to be_ok
+ end
+
+ it 'runs with backup type custom, all backup params set to true' do
+ expect_task('peadm::backup_classification').with_params({ 'directory' => '/tmp/pe-backup-1970-01-01T000000Z/classifier' })
+
+ expect_out_message.with_params('# Backing up classification')
+ expect_out_message.with_params('# Backing up database pe-orchestrator')
+ expect_out_message.with_params('# Backing up database pe-activity')
+ expect_out_message.with_params('# Backing up database pe-rbac')
+ expect_out_message.with_params('# Backing up database pe-puppetdb')
+
+ expect_command("/opt/puppetlabs/bin/puppet-backup create --dir=/tmp/pe-backup-1970-01-01T000000Z/ca --scope=certs\n")
+ expect_command("/opt/puppetlabs/bin/puppet-backup create --dir=/tmp/pe-backup-1970-01-01T000000Z/code --scope=code\n")
+ expect_command('chown pe-postgres /tmp/pe-backup-1970-01-01T000000Z/config')
+ expect_command("/opt/puppetlabs/bin/puppet-backup create --dir=/tmp/pe-backup-1970-01-01T000000Z/config --scope=config\n")
+ expect_command("test -f /etc/puppetlabs/console-services/conf.d/secrets/keys.json && cp -rp /etc/puppetlabs/console-services/conf.d/secrets /tmp/pe-backup-1970-01-01T000000Z/rbac/ || echo secret ldap key doesnt exist\n")
+ expect_command("cp -rp /etc/puppetlabs/orchestration-services/conf.d/secrets /tmp/pe-backup-1970-01-01T000000Z/orchestrator/\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /tmp/pe-backup-1970-01-01T000000Z/orchestrator/pe-orchestrator.dump.d "sslmode=verify-ca host=primary user=pe-orchestrator sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-orchestrator"' + "\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /tmp/pe-backup-1970-01-01T000000Z/activity/pe-activity.dump.d "sslmode=verify-ca host=primary user=pe-activity sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-activity"' + "\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /tmp/pe-backup-1970-01-01T000000Z/rbac/pe-rbac.dump.d "sslmode=verify-ca host=primary user=pe-rbac sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-rbac"' + "\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_dump -Fd -Z3 -j4 -f /tmp/pe-backup-1970-01-01T000000Z/puppetdb/pe-puppetdb.dump.d "sslmode=verify-ca host=postgres user=pe-puppetdb sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-puppetdb"' + "\n")
+ expect_command('umask 0077 && cd /tmp && tar -czf /tmp/pe-backup-1970-01-01T000000Z.tar.gz pe-backup-1970-01-01T000000Z && rm -rf /tmp/pe-backup-1970-01-01T000000Z' + "\n")
+
+ expect(run_plan('peadm::backup', all_backup_options)).to be_ok
+ end
+end
diff --git a/spec/plans/restore_spec.rb b/spec/plans/restore_spec.rb
new file mode 100644
index 00000000..52754b8e
--- /dev/null
+++ b/spec/plans/restore_spec.rb
@@ -0,0 +1,124 @@
+# frozen_string_literal: true
+
+# rubocop:disable Layout/LineLength
+
+require 'spec_helper'
+
+describe 'peadm::restore' do
+ include BoltSpec::Plans
+
+ backup_dir = '/input/file'
+ backup_tarball = "#{backup_dir}.tar.gz"
+
+ let(:recovery_params) do
+ {
+ 'targets' => 'primary',
+ 'input_file' => backup_tarball,
+ 'restore_type' => 'recovery'
+ }
+ end
+ let(:recovery_db_params) do
+ {
+ 'targets' => 'primary',
+ 'input_file' => backup_tarball,
+ 'restore_type' => 'recovery-db'
+ }
+ end
+ let(:classifier_only_params) do
+ {
+ 'targets' => 'primary',
+ 'input_file' => backup_tarball,
+ 'restore_type' => 'custom',
+ 'restore' => {
+ 'activity' => false,
+ 'ca' => false,
+ 'classifier' => true,
+ 'code' => false,
+ 'config' => false,
+ 'orchestrator' => false,
+ 'puppetdb' => false,
+ 'rbac' => false,
+ }
+ }
+ end
+
+ let(:cluster) { { 'params' => { 'primary_host' => 'primary', 'primary_postgresql_host' => 'postgres' } } }
+
+ before(:each) do
+ allow_apply
+
+ expect_out_message.with_params('cluster: ' + cluster.to_s.delete('"').gsub(%r{=>}, ' => '))
+ expect_out_message.with_params('# Restoring ldap secret key if it exists')
+ allow_task('peadm::puppet_runonce')
+ end
+
+ # only run for tests that have the :valid_cluster tag
+ before(:each, valid_cluster: true) do
+ expect_task('peadm::get_peadm_config').always_return(cluster)
+ end
+
+ it 'runs with recovery params', valid_cluster: true do
+ expect_out_message.with_params('# Restoring database pe-puppetdb')
+ expect_out_message.with_params('# Restoring ca, certs, code and config for recovery')
+
+ expect_command("umask 0077 && cd /input && tar -xzf /input/file.tar.gz\n")
+ expect_command("/opt/puppetlabs/bin/puppet-backup restore --scope=certs,code,config --tempdir=/input/file --force /input/file/recovery/pe_backup-*tgz\n")
+ expect_command("systemctl stop pe-console-services pe-nginx pxp-agent pe-puppetserver pe-orchestration-services puppet pe-puppetdb\n")
+ expect_command("test -f /input/file/rbac/keys.json && cp -rp /input/file/keys.json /etc/puppetlabs/console-services/conf.d/secrets/ || echo secret ldap key doesnt exist\n")
+ expect_command("su - pe-postgres -s /bin/bash -c \"/opt/puppetlabs/server/bin/psql --tuples-only -d 'pe-puppetdb' -c 'DROP SCHEMA IF EXISTS pglogical CASCADE;'\"\n").be_called_times(2)
+ expect_command("su - pe-postgres -s /bin/bash -c \"/opt/puppetlabs/server/bin/psql -d 'pe-puppetdb' -c 'DROP SCHEMA public CASCADE; CREATE SCHEMA public;'\"\n")
+ expect_command('su - pe-postgres -s /bin/bash -c "/opt/puppetlabs/server/bin/psql -d \'pe-puppetdb\' -c \'ALTER USER \\"pe-puppetdb\\" WITH SUPERUSER;\'"' + "\n")
+ expect_command('/opt/puppetlabs/server/bin/pg_restore -j 4 -d "sslmode=verify-ca host=postgres sslcert=/etc/puppetlabs/puppetdb/ssl/primary.cert.pem sslkey=/etc/puppetlabs/puppetdb/ssl/primary.private_key.pem sslrootcert=/etc/puppetlabs/puppet/ssl/certs/ca.pem dbname=pe-puppetdb user=pe-puppetdb" -Fd /input/file/puppetdb/pe-puppetdb.dump.d' + "\n")
+ expect_command('su - pe-postgres -s /bin/bash -c "/opt/puppetlabs/server/bin/psql -d \'pe-puppetdb\' -c \'ALTER USER \\"pe-puppetdb\\" WITH NOSUPERUSER;\'"' + "\n")
+ expect_command('su - pe-postgres -s /bin/bash -c "/opt/puppetlabs/server/bin/psql -d \'pe-puppetdb\' -c \'DROP EXTENSION IF EXISTS pglogical CASCADE;\'"' + "\n")
+ expect_command("/opt/puppetlabs/bin/puppet-infrastructure configure --no-recover\n")
+
+ expect(run_plan('peadm::restore', recovery_params)).to be_ok
+ end
+
+ it 'runs with default recovery', valid_cluster: true do
+ allow_any_command
+
+ expect_out_message.with_params('# Restoring database pe-puppetdb')
+ expect_out_message.with_params('# Restoring ca, certs, code and config for recovery')
+
+ expect(run_plan('peadm::restore', { 'targets' => 'primary', 'input_file' => backup_tarball })).to be_ok
+ end
+
+ it 'runs with recovery-db params', valid_cluster: true do
+ allow_any_command
+
+ expect_out_message.with_params('# Restoring primary database for recovery')
+ expect_out_message.with_params('# Restoring database pe-puppetdb')
+
+ expect(run_plan('peadm::restore', recovery_db_params)).to be_ok
+ end
+
+ it 'runs with classifier-only params', valid_cluster: true do
+ allow_any_command
+
+ expect_task('peadm::restore_classification').with_params({
+ 'classification_file' => "#{backup_dir}/classifier/classification_backup.json"
+ })
+
+ expect(run_plan('peadm::restore', classifier_only_params)).to be_ok
+ end
+
+ it 'runs with recovery params, no valid cluster', valid_cluster: false do
+ allow_any_command
+
+ # simulate a failure to get the cluster configuration
+ expect_task('peadm::get_peadm_config').always_return({})
+ expect_out_message.with_params('Failed to get cluster configuration, loading from backup...')
+
+ # download mocked to return the path to the file fixtures/peadm_config.json
+ expect_download("#{backup_dir}/peadm/peadm_config.json").return do |targets, _source, _destination, _params|
+ results = targets.map do |target|
+ Bolt::Result.new(target, value: { 'path' => File.expand_path(File.join(fixtures, 'peadm_config.json')) })
+ end
+
+ Bolt::ResultSet.new(results)
+ end
+ expect(run_plan('peadm::restore', recovery_params)).to be_ok
+ end
+end
diff --git a/spec/plans/util/sanitize_pg_pe_conf_spec.rb b/spec/plans/util/sanitize_pg_pe_conf_spec.rb
index 25384656..38b5521e 100644
--- a/spec/plans/util/sanitize_pg_pe_conf_spec.rb
+++ b/spec/plans/util/sanitize_pg_pe_conf_spec.rb
@@ -1,4 +1,4 @@
-# spec/spec_helper.rb
+require 'spec_helper'
describe 'peadm::util::sanitize_pg_pe_conf ' do
# Include the BoltSpec library functions
@@ -9,6 +9,7 @@
end
it 'Runs' do
+ allow_any_out_message
# 1) peadm::util::sanitize_pg_pe_conf Runs
# Failure/Error: expect(run_plan('peadm::util::sanitize_pg_pe_conf', 'targets' => 'foo,bar', 'primary_host' => 'pe-server-d8b317-0.us-west1-a.c.davidsand.internal')).to be_ok
# expected `#>, @status="failure">.ok?` to be truthy, got false
diff --git a/tasks/get_peadm_config.rb b/tasks/get_peadm_config.rb
index 5fc3dc56..30d8ad21 100755
--- a/tasks/get_peadm_config.rb
+++ b/tasks/get_peadm_config.rb
@@ -11,7 +11,13 @@ class GetPEAdmConfig
def initialize(params); end
def execute!
- puts config.to_json
+ # if there is no 'PE HA Replica' node group, it's not a peadm-configured cluster.
+ replica_group = groups.data.find { |obj| obj['name'] == 'PE HA Replica' }
+ if replica_group
+ puts config.to_json
+ else
+ puts({ 'error' => 'This is not a peadm-compatible cluster. Use peadm::convert first.' }).to_json
+ end
end
def config
diff --git a/tasks/reinstall_pe.json b/tasks/reinstall_pe.json
new file mode 100644
index 00000000..2fc81e23
--- /dev/null
+++ b/tasks/reinstall_pe.json
@@ -0,0 +1,19 @@
+{
+ "description": "Reinstall PE, only to be used to restore PE",
+ "parameters": {
+ "version": {
+ "type": "String[1]",
+ "description": "The PE version to install"
+ },
+ "arch": {
+ "type": "String[1]",
+ "description": "The PE installation platform",
+ "default": "el-8-x86_64"
+ },
+ "uninstall": {
+ "type": "Boolean",
+ "description": "Whether we want to uninstall PE before installing",
+ "default": false
+ }
+ }
+}
diff --git a/tasks/reinstall_pe.sh b/tasks/reinstall_pe.sh
new file mode 100644
index 00000000..a7d8a4da
--- /dev/null
+++ b/tasks/reinstall_pe.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -e
+# This task reinstalls PE and needs to run as root.
+
+# Uninstall PE if installed
+if [[ "$PT_uninstall" == true ]]; then
+ /opt/puppetlabs/bin/puppet-enterprise-uninstaller -p -d -y || true
+fi
+
+# Download PE
+INSTALLER="puppet-enterprise-${PT_version}-${PT_arch}"
+curl -O "https://s3.amazonaws.com/pe-builds/released/${PT_version}/${INSTALLER}.tar.gz"
+tar xf "${INSTALLER}.tar.gz"
+
+# Install PE. We need to pass "y" through stdin since the flag -y requires pe.conf to be present.
+cd $INSTALLER
+echo 'y' | ./puppet-enterprise-installer
\ No newline at end of file
diff --git a/tasks/transform_classification_groups.json b/tasks/transform_classification_groups.json
index 4284d04f..3bb9c203 100644
--- a/tasks/transform_classification_groups.json
+++ b/tasks/transform_classification_groups.json
@@ -1,17 +1,15 @@
{
- "description": "Transform the user groups from a source backup to a list of groups on the target server",
- "parameters": {
- "source_directory": {
- "type": "String",
- "description": "Location of Source node group yaml file"
- },
- "working_directory": {
- "type": "String",
- "description": "Location of target node group yaml file and where to create the transformed file"
- }
+ "description": "Transform the user groups from a source backup to a list of groups on the target server",
+ "parameters": {
+ "source_directory": {
+ "type": "String",
+ "description": "Location of Source node group yaml file"
},
- "input_method": "stdin",
- "implementations": [
- {"name": "transform_classification_groups.py"}
- ]
- }
\ No newline at end of file
+ "working_directory": {
+ "type": "String",
+ "description": "Location of target node group yaml file and where to create the transformed file"
+ }
+ },
+ "input_method": "stdin",
+ "implementations": [{ "name": "transform_classification_groups.rb" }]
+}
diff --git a/tasks/transform_classification_groups.rb b/tasks/transform_classification_groups.rb
new file mode 100755
index 00000000..eeb031e4
--- /dev/null
+++ b/tasks/transform_classification_groups.rb
@@ -0,0 +1,64 @@
+#!/opt/puppetlabs/puppet/bin/ruby
+# frozen_string_literal: true
+
+# rubocop:disable Naming/VariableName
+
+# This script takes two classification outputs from source and target puppet infrastructure and
+# takes the user definitions from the source and adds them to the infrastructure definitions of the
+# target. This allows the ability to restore a backup of user node definitions.
+
+require 'json'
+
+# Parse JSON from stdin
+params = JSON.parse(STDIN.read)
+source_classification_file = "#{params['source_directory']}/classification_backup.json"
+target_classification_file = "#{params['working_directory']}/classification_backup.json"
+transformed_classification_file = "#{params['working_directory']}/transformed_classification.json"
+
+# Function to remove subgroups
+def removesubgroups(data_rsg, id_rsg)
+ groups = data_rsg.select { |x| x['parent'] == id_rsg }
+ groups.each do |group|
+ subid = group['id']
+ data_rsg.reject! { |x| x['id'] == subid }
+ data_rsg = removesubgroups(data_rsg, subid)
+ end
+ data_rsg
+end
+
+# Function to add subgroups
+def addsubgroups(data_asg, id_asg, peinf_asg)
+ groups = data_asg.select { |x| x['parent'] == id_asg }
+ peinf_asg += groups
+ groups.each do |group|
+ subid = group['id']
+ peinf_asg = addsubgroups(data_asg, subid, peinf_asg)
+ end
+ peinf_asg
+end
+
+# Read the backup classification
+data = JSON.parse(File.read(source_classification_file))
+
+# Read the DR server classification
+data_DR = JSON.parse(File.read(target_classification_file))
+
+# Find the infrastructure group and its ID
+peinf = data.select { |x| x['name'] == 'PE Infrastructure' }
+group_id = peinf[0]['id']
+
+# Remove this group from the list and recursively remove all subgroups
+data.reject! { |x| x['id'] == group_id }
+data = removesubgroups(data, group_id)
+
+# Find the DR infrastructure group and its ID
+peinf_DR = data_DR.select { |x| x['name'] == 'PE Infrastructure' }
+id_DR = peinf_DR[0]['id']
+
+# Recursively go through inf groups to get the full tree
+peinf_DR = addsubgroups(data_DR, id_DR, peinf_DR)
+
+# Add the contents of the backup classification without PE inf to the DR PE inf groups
+# and write to a file
+peinf_transformed_groups = data + peinf_DR
+File.open(transformed_classification_file, 'w') { |file| file.write(JSON.pretty_generate(peinf_transformed_groups)) }
diff --git a/types/recovery_opts.pp b/types/recovery_opts.pp
index c759e955..eb392752 100644
--- a/types/recovery_opts.pp
+++ b/types/recovery_opts.pp
@@ -1,8 +1,10 @@
type Peadm::Recovery_opts = Struct[{
+ 'activity' => Optional[Boolean],
+ 'ca' => Optional[Boolean],
+ 'classifier' => Optional[Boolean],
+ 'code' => Optional[Boolean],
+ 'config' => Optional[Boolean],
'orchestrator' => Optional[Boolean],
'puppetdb' => Optional[Boolean],
'rbac' => Optional[Boolean],
- 'activity' => Optional[Boolean],
- 'ca' => Optional[Boolean[false]],
- 'classifier' => Optional[Boolean],
}]