Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
Signed-off-by: Satoru Takeuchi <satoru.takeuchi@gmail.com>
  • Loading branch information
satoru-takeuchi committed May 1, 2024
1 parent 6d3f86c commit c17aa32
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 269 deletions.
274 changes: 6 additions & 268 deletions .github/workflows/go-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ jobs:
runs-on: ubuntu-20.04
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
steps:
- name: checkout
uses: actions/checkout@v4
with:
Expand Down Expand Up @@ -52,111 +51,18 @@ jobs:
set -ex
kubectl rook-ceph ceph status
- name: Ceph daemon
run: |
set -ex
kubectl rook-ceph ceph daemon mon.a dump_historic_ops
- name: Ceph status using context
run: |
set -ex
kubectl rook-ceph --context=$(kubectl config current-context) ceph status
- name: Rados df using context
run: |
set -ex
kubectl rook-ceph --context=$(kubectl config current-context) rados df
- name: Mon restore
run: |
set -ex
# test the mon restore to restore to mon a, delete mons b and c, then add d and e
kubectl rook-ceph mons restore-quorum a
kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s
kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s
tests/github-action-helper.sh wait_for_three_mons rook-ceph
kubectl -n rook-ceph wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s
kubectl -n rook-ceph wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s
- name: RBD command
run: |
set -ex
kubectl rook-ceph rbd ls replicapool
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph subvolume ls
kubectl rook-ceph subvolume ls --stale
kubectl rook-ceph subvolume delete myfs test-subvol group-a
tests/github-action-helper.sh create_sc_with_retain_policy
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
set -ex
kubectl rook-ceph mons
- name: Update operator configmap
run: |
set -ex
kubectl rook-ceph operator set ROOK_LOG_LEVEL DEBUG
- name: Print cr status
run: |
set -ex
kubectl rook-ceph rook version
kubectl rook-ceph rook status
kubectl rook-ceph rook status all
kubectl rook-ceph rook status cephobjectstores
- name: Restart operator pod
run: |
set -ex
kubectl rook-ceph operator restart
# let's wait for operator pod to be restart
POD=$(kubectl -n rook-ceph get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}")
kubectl -n rook-ceph wait --for=delete pod/$POD --timeout=100s
tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_default
- name: Maintenance Mode
- name: Show Cluster State
run: |
set -ex
kubectl rook_ceph maintenance start rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance rook-ceph
kubectl rook_ceph maintenance stop rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 rook-ceph
kubectl -n rook-ceph get all
- name: Purge Osd
- name: Flatten a PVC clone
run: |
set -ex
kubectl -n rook-ceph scale deployment rook-ceph-osd-0 --replicas 0
kubectl rook-ceph rook purge-osd 0 --force
- name: Restore CRD without CRName
run: |
# First let's delete the cephCluster
kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph -n rook-ceph restore-deleted cephclusters
tests/github-action-helper.sh wait_for_crd_to_be_ready_default
tests/github-action-helper.sh install_external_snapshotter
tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound
- name: Restore CRD with CRName
run: |
# First let's delete the cephCluster
kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph -n rook-ceph restore-deleted cephclusters my-cluster
tests/github-action-helper.sh wait_for_crd_to_be_ready_default
- name: Show Cluster State
run: |
set -ex
kubectl -n rook-ceph get all
kubectl rook-ceph flatten-rbd-pvc rbd-pvc-clone
- name: Destroy Cluster (removing CRs)
env:
Expand All @@ -178,171 +84,3 @@ jobs:
uses: mxschmitt/action-tmate@v3
with:
limit-access-to-actor: false

custom-namespace:
runs-on: ubuntu-20.04
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: setup golang
uses: ./.github/workflows/set-up-go

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster
uses: ./.github/workflows/cluster-setup
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
op-ns: "test-operator"
cluster-ns: "test-cluster"

- name: build the binary and run unit tests
run: |
make build
sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph
make test
- name: Cluster Health
run: |
set -e
kubectl rook-ceph --operator-namespace test-operator -n test-cluster health
- name: Ceph status
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph status
- name: Ceph daemon
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph daemon osd.0 dump_historic_ops
- name: Rados df
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rados df
- name: Ceph status using context
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster --context=$(kubectl config current-context) ceph status
- name: Mon restore
run: |
set -ex
# test the mon restore to restore to mon a, delete mons b and c, then add d and e
kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons restore-quorum a
kubectl -n test-cluster wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s
kubectl -n test-cluster wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s
tests/github-action-helper.sh wait_for_three_mons test-cluster
kubectl -n test-cluster wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s
kubectl -n test-cluster wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s
- name: RBD command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rbd ls replicapool
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a
tests/github-action-helper.sh create_sc_with_retain_policy_custom_ns test-operator test-cluster
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons
- name: Update operator configmap
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator set ROOK_LOG_LEVEL DEBUG
- name: Print cr status
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook version
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status all
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status cephobjectstores
- name: Restart operator pod
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator restart
# let's wait for operator pod to be restart
POD=$(kubectl -n test-operator get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}")
kubectl -n test-operator wait --for=delete pod/$POD --timeout=100s
tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_custom
- name: Maintenance Mode
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance start rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance test-cluster
kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance stop rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 test-cluster
- name: Purge Osd
run: |
set -ex
kubectl -n test-cluster scale deployment rook-ceph-osd-0 --replicas 0
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook purge-osd 0 --force
- name: Restore CRD without CRName
run: |
# First let's delete the cephCluster
kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters
tests/github-action-helper.sh wait_for_crd_to_be_ready_custom
- name: Restore CRD with CRName
run: |
# First let's delete the cephCluster
kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters my-cluster
tests/github-action-helper.sh wait_for_crd_to_be_ready_custom
- name: Show Cluster State
run: |
set -ex
kubectl -n test-cluster get all
- name: Destroy Cluster (removing CRs)
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster destroy-cluster
sleep 1
kubectl get deployments -n test-cluster --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n test-cluster; exit 1;})
- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: go-test-custom-namespace

- name: consider debugging
if: failure()
uses: mxschmitt/action-tmate@v3
with:
limit-access-to-actor: false
2 changes: 1 addition & 1 deletion docs/flatten-rbd-pvc.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ By flattening RBD images, we can bypass the problems specific to non-flattened c
## Examples.

```bash
kubectl rook-ceph flatten-rbd-pvc rbd-pvc
kubectl rook-ceph flatten-rbd-pvc rbd-pvc-clone
```
22 changes: 22 additions & 0 deletions tests/github-action-helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,28 @@ install_minikube_with_none_driver() {
sudo -E minikube start --kubernetes-version="$1" --driver=none --memory 6g --cpus=2 --addons ingress --cni=calico
}

install_external_snapshotter() {
curl -L https://github.com/kubernetes-csi/external-snapshotter/archive/refs/tags/v6.2.0.zip -o external-snapshotter.zip
unzip external-snapshotter.zip
cd external-snapshotter-6.2.0

kubectl kustomize client/config/crd | kubectl create -f -
kubectl -n kube-system kustomize deploy/kubernetes/snapshot-controller | kubectl create -f -
}

wait_for_rbd_pvc_clone_to_be_bound() {
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/rbd/pvc-clone.yaml

timeout 100 bash <<-'EOF'
until [ $(kubectl get pvc rbd-pvc-clone -o jsonpath='{.status.phase}') == "Bound" ]; do
echo "waiting for the pvc clone to be in bound state"
sleep 1
done
EOF
timeout_command_exit_code
}
}

########
# MAIN #
########
Expand Down

0 comments on commit c17aa32

Please sign in to comment.