diff --git a/api/v1beta2/mysqlcluster_types.go b/api/v1beta2/mysqlcluster_types.go index 3474b1e16..d4315638e 100644 --- a/api/v1beta2/mysqlcluster_types.go +++ b/api/v1beta2/mysqlcluster_types.go @@ -122,6 +122,10 @@ type MySQLClusterSpec struct { // During container init moco-agent will set mysql admin interface is bound to localhost. The moco-agent will also // communicate with mysqld over localhost when acting as a sidecar. AgentUseLocalhost bool `json:"agentUseLocalhost,omitempty"` + + // Offline sets the cluster offline, releasing compute resources. Data is not removed. + // +optional + Offline bool `json:"offline,omitempty"` } func (s MySQLClusterSpec) validateCreate() (admission.Warnings, field.ErrorList) { diff --git a/backup/backup.go b/backup/backup.go index bb497a5cd..fdfef7fdc 100644 --- a/backup/backup.go +++ b/backup/backup.go @@ -102,6 +102,10 @@ func (bm *BackupManager) Backup(ctx context.Context) error { return fmt.Errorf("failed to get pod list: %w", err) } + if bm.cluster.Spec.Offline { + return fmt.Errorf("cluster is configured to be offline %s/%s", bm.cluster.Namespace, bm.cluster.Name) + } + if len(pods.Items) != int(bm.cluster.Spec.Replicas) { return fmt.Errorf("too few Pods for %s/%s", bm.cluster.Namespace, bm.cluster.Name) } diff --git a/charts/moco/templates/generated/crds/moco_crds.yaml b/charts/moco/templates/generated/crds/moco_crds.yaml index a33dad9d0..1ab4e5a4b 100644 --- a/charts/moco/templates/generated/crds/moco_crds.yaml +++ b/charts/moco/templates/generated/crds/moco_crds.yaml @@ -2244,6 +2244,9 @@ spec: description: 'MySQLConfigMapName is a `ConfigMap` name of MySQL ' nullable: true type: string + offline: + description: Offline sets the cluster offline, releasing comput + type: boolean podTemplate: description: PodTemplate is a `Pod` template for MySQL server c properties: diff --git a/clustering/process.go b/clustering/process.go index c965791ac..054713f81 100644 --- a/clustering/process.go +++ b/clustering/process.go @@ -187,6 +187,8 @@ func (p *managerProcess) do(ctx context.Context) (bool, error) { logFromContext(ctx).Info("cluster state is " + ss.State.String()) switch ss.State { + case StateOffline: + return false, nil case StateCloning: if p.isCloning(ctx, ss) { return false, nil @@ -281,6 +283,7 @@ func (p *managerProcess) updateStatus(ctx context.Context, ss *StatusSet) error case StateFailed: case StateLost: case StateIncomplete: + case StateOffline: } meta.SetStatusCondition(&cluster.Status.Conditions, updateCond(mocov1beta2.ConditionInitialized, initialized)) @@ -309,7 +312,7 @@ func (p *managerProcess) updateStatus(ctx context.Context, ss *StatusSet) error var syncedReplicas int for _, pod := range ss.Pods { - if isPodReady(pod) { + if pod != nil && isPodReady(pod) { syncedReplicas++ } } diff --git a/clustering/status.go b/clustering/status.go index 15322753a..5146a8144 100644 --- a/clustering/status.go +++ b/clustering/status.go @@ -50,6 +50,7 @@ const ( StateDegraded StateFailed StateLost + StateOffline ) // String returns a unique string for each ClusterState. @@ -71,6 +72,8 @@ func (s ClusterState) String() string { return "Failed" case StateLost: return "Lost" + case StateOffline: + return "Offline" } panic(int(s)) @@ -107,6 +110,8 @@ func (ss *StatusSet) Close() { // It may also set `ss.NeedSwitch` and `ss.Candidate` for switchover. func (ss *StatusSet) DecideState() { switch { + case isOffline(ss): + ss.State = StateOffline case isCloning(ss): ss.State = StateCloning case isRestoring(ss): @@ -160,7 +165,7 @@ func (p *managerProcess) GatherStatus(ctx context.Context) (*StatusSet, error) { return nil, fmt.Errorf("failed to list Pods: %w", err) } - if int(cluster.Spec.Replicas) != len(pods.Items) { + if !cluster.Spec.Offline && int(cluster.Spec.Replicas) != len(pods.Items) { return nil, fmt.Errorf("too few pods; only %d pods exist", len(pods.Items)) } ss.Pods = make([]*corev1.Pod, cluster.Spec.Replicas) @@ -547,6 +552,10 @@ func isLost(ss *StatusSet) bool { return okReplicas <= (int(ss.Cluster.Spec.Replicas) / 2) } +func isOffline(ss *StatusSet) bool { + return ss.Cluster.Spec.Offline +} + func needSwitch(pod *corev1.Pod) bool { if pod.DeletionTimestamp != nil { return true diff --git a/cmd/kubectl-moco/cmd/switchover.go b/cmd/kubectl-moco/cmd/switchover.go index 49bed93eb..78bb63d73 100644 --- a/cmd/kubectl-moco/cmd/switchover.go +++ b/cmd/kubectl-moco/cmd/switchover.go @@ -30,6 +30,10 @@ func switchover(ctx context.Context, name string) error { return err } + if cluster.Spec.Offline { + return errors.New("offline cluster is not able to switch") + } + if cluster.Spec.Replicas == 1 { return errors.New("single-instance cluster is not able to switch") } diff --git a/config/crd/bases/moco.cybozu.com_mysqlclusters.yaml b/config/crd/bases/moco.cybozu.com_mysqlclusters.yaml index 8c4f5558e..1f91f55ac 100644 --- a/config/crd/bases/moco.cybozu.com_mysqlclusters.yaml +++ b/config/crd/bases/moco.cybozu.com_mysqlclusters.yaml @@ -82,6 +82,9 @@ spec: description: 'MySQLConfigMapName is a `ConfigMap` name of MySQL ' nullable: true type: string + offline: + description: Offline sets the cluster offline, releasing comput + type: boolean podTemplate: description: PodTemplate is a `Pod` template for MySQL server c properties: diff --git a/config/crd/tests/apiextensions.k8s.io_v1_customresourcedefinition_mysqlclusters.moco.cybozu.com.yaml b/config/crd/tests/apiextensions.k8s.io_v1_customresourcedefinition_mysqlclusters.moco.cybozu.com.yaml index ae5e5d31f..3351a2738 100644 --- a/config/crd/tests/apiextensions.k8s.io_v1_customresourcedefinition_mysqlclusters.moco.cybozu.com.yaml +++ b/config/crd/tests/apiextensions.k8s.io_v1_customresourcedefinition_mysqlclusters.moco.cybozu.com.yaml @@ -82,6 +82,9 @@ spec: description: 'MySQLConfigMapName is a `ConfigMap` name of MySQL ' nullable: true type: string + offline: + description: Offline sets the cluster offline, releasing comput + type: boolean podTemplate: description: PodTemplate is a `Pod` template for MySQL server c properties: diff --git a/controllers/mysqlcluster_controller.go b/controllers/mysqlcluster_controller.go index 04665525a..590dfc8d8 100644 --- a/controllers/mysqlcluster_controller.go +++ b/controllers/mysqlcluster_controller.go @@ -705,10 +705,14 @@ func (r *MySQLClusterReconciler) reconcileV1StatefulSet(ctx context.Context, req return fmt.Errorf("failed to get StatefulSet %s/%s: %w", cluster.Namespace, cluster.PrefixedName(), err) } + replicas := cluster.Spec.Replicas + if cluster.Spec.Offline { + replicas = 0 + } sts := appsv1ac.StatefulSet(cluster.PrefixedName(), cluster.Namespace). WithLabels(labelSet(cluster, false)). WithSpec(appsv1ac.StatefulSetSpec(). - WithReplicas(cluster.Spec.Replicas). + WithReplicas(replicas). WithSelector(metav1ac.LabelSelector(). WithMatchLabels(labelSet(cluster, false))). WithPodManagementPolicy(appsv1.ParallelPodManagement). @@ -972,7 +976,7 @@ func (r *MySQLClusterReconciler) reconcileV1PDB(ctx context.Context, req ctrl.Re pdb.Namespace = cluster.Namespace pdb.Name = cluster.PrefixedName() - if cluster.Spec.Replicas < 3 { + if cluster.Spec.Offline || cluster.Spec.Replicas < 3 { err := r.Delete(ctx, pdb) if err == nil { log.Info("removed pod disruption budget") diff --git a/controllers/mysqlcluster_controller_test.go b/controllers/mysqlcluster_controller_test.go index c49a04f30..d23fd4985 100644 --- a/controllers/mysqlcluster_controller_test.go +++ b/controllers/mysqlcluster_controller_test.go @@ -1717,6 +1717,45 @@ var _ = Describe("MySQLCluster reconciler", func() { }).Should(Succeed()) }) + It("should scale down statefulset when offline", func() { + cluster := testNewMySQLCluster("test") + err := k8sClient.Create(ctx, cluster) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() error { + sts := &appsv1.StatefulSet{} + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: "test", Name: "moco-test"}, sts); err != nil { + return err + } + if sts.Spec.Replicas == nil || *sts.Spec.Replicas != cluster.Spec.Replicas { + return fmt.Errorf("replica count should match cluster") + } + return nil + }).Should(Succeed()) + + By("setting cluster offline") + Eventually(func() error { + cluster2 := &mocov1beta2.MySQLCluster{} + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster2); err != nil { + return err + } + cluster2.Spec.Offline = true + return k8sClient.Update(ctx, cluster2) + }).Should(Succeed()) + + By("checking statefulset is scaled down") + Eventually(func() error { + sts := &appsv1.StatefulSet{} + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: "test", Name: "moco-test"}, sts); err != nil { + return err + } + if sts.Spec.Replicas == nil || *sts.Spec.Replicas != 0 { + return fmt.Errorf("replica count should be 0 for offline cluster") + } + return nil + }).Should(Succeed()) + }) + It("should sets ConditionStatefulSetReady to be true when StatefulSet is ready", func() { cluster := testNewMySQLCluster("test") err := k8sClient.Create(ctx, cluster) @@ -1754,7 +1793,7 @@ var _ = Describe("MySQLCluster reconciler", func() { return fmt.Errorf("condition does not exists") } if conditionStatefulSetReady.Status != metav1.ConditionTrue { - return fmt.Errorf("condition is not false") + return fmt.Errorf("condition is not true") } return nil }).Should(Succeed()) diff --git a/docs/crd_mysqlcluster_v1beta2.md b/docs/crd_mysqlcluster_v1beta2.md index a17e254ce..0647fdaa8 100644 --- a/docs/crd_mysqlcluster_v1beta2.md +++ b/docs/crd_mysqlcluster_v1beta2.md @@ -84,6 +84,7 @@ MySQLClusterSpec defines the desired state of MySQLCluster | restore | Restore is the specification to perform Point-in-Time-Recovery from existing cluster. If this field is not null, MOCO restores the data as specified and create a new cluster with the data. This field is not editable. | *[RestoreSpec](#restorespec) | false | | disableSlowQueryLogContainer | DisableSlowQueryLogContainer controls whether to add a sidecar container named \"slow-log\" to output slow logs as the containers output. If set to true, the sidecar container is not added. The default is false. | bool | false | | agentUseLocalhost | AgentUseLocalhost configures the mysqld interface to bind and be accessed over localhost instead of pod name. During container init moco-agent will set mysql admin interface is bound to localhost. The moco-agent will also communicate with mysqld over localhost when acting as a sidecar. | bool | false | +| offline | Offline sets the cluster offline, releasing compute resources. Data is not removed. | bool | false | [Back to Custom Resources](#custom-resources) diff --git a/e2e/backup_with_env_test.go b/e2e/backup_with_env_test.go index fed78b200..1c94c319a 100644 --- a/e2e/backup_with_env_test.go +++ b/e2e/backup_with_env_test.go @@ -138,14 +138,6 @@ var _ = Context("backup with ObjectBucketName is set in environments variables", It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "backup", "mysqlclusters", "--all") - - Eventually(func(g Gomega) { - out, err := kubectl(nil, "get", "-n", "backup", "pod", "-o", "json") - g.Expect(err).NotTo(HaveOccurred()) - pods := &corev1.PodList{} - err = json.Unmarshal(out, pods) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(pods.Items)).To(BeNumerically(">", 0), "wait until all Pods are deleted") - }).Should(Succeed()) + verifyAllPodsDeleted("backup") }) }) diff --git a/e2e/failover_test.go b/e2e/failover_test.go index c91051f47..689083627 100644 --- a/e2e/failover_test.go +++ b/e2e/failover_test.go @@ -2,14 +2,12 @@ package e2e import ( _ "embed" - "encoding/json" "errors" "fmt" mocov1beta2 "github.com/cybozu-go/moco/api/v1beta2" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -138,20 +136,6 @@ var _ = Context("failure", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "failover", "mysqlclusters", "--all") - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "failover", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) + verifyAllPodsDeleted("failover") }) }) diff --git a/e2e/failure_test.go b/e2e/failure_test.go index 3f3e4d019..cd5c94383 100644 --- a/e2e/failure_test.go +++ b/e2e/failure_test.go @@ -3,7 +3,6 @@ package e2e import ( "context" _ "embed" - "encoding/json" "errors" "fmt" "sync" @@ -12,7 +11,6 @@ import ( mocov1beta2 "github.com/cybozu-go/moco/api/v1beta2" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -98,20 +96,6 @@ var _ = Context("failure", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "failure", "mysqlclusters", "--all") - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "failure", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) + verifyAllPodsDeleted("failure") }) }) diff --git a/e2e/lifecycle_test.go b/e2e/lifecycle_test.go index 41986dcd7..dd87434ec 100644 --- a/e2e/lifecycle_test.go +++ b/e2e/lifecycle_test.go @@ -4,7 +4,6 @@ import ( "bytes" _ "embed" "encoding/json" - "errors" "fmt" "strconv" "strings" @@ -284,20 +283,6 @@ var _ = Context("lifecycle", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "foo", "mysqlclusters", "--all") - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "foo", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) + verifyAllPodsDeleted("foo") }) }) diff --git a/e2e/offline_test.go b/e2e/offline_test.go new file mode 100644 index 000000000..2e54fe4f7 --- /dev/null +++ b/e2e/offline_test.go @@ -0,0 +1,104 @@ +package e2e + +import ( + _ "embed" + "errors" + "fmt" + mocov1beta2 "github.com/cybozu-go/moco/api/v1beta2" + "github.com/cybozu-go/moco/clustering" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strconv" + "strings" +) + +//go:embed testdata/offline_test.yaml +var offlineYAML string + +//go:embed testdata/offline_test_changed.yaml +var offlineChangedYAML string + +var _ = Context("offline", func() { + if doUpgrade { + return + } + + It("should construct a cluster", func() { + kubectlSafe(fillTemplate(offlineYAML), "apply", "-f", "-") + Eventually(func() error { + cluster, err := getCluster("offline", "test") + if err != nil { + return err + } + for _, cond := range cluster.Status.Conditions { + if cond.Type != mocov1beta2.ConditionHealthy { + continue + } + if cond.Status == metav1.ConditionTrue { + return nil + } + return fmt.Errorf("cluster is not healthy: %s", cond.Status) + } + return errors.New("no health condition") + }).Should(Succeed()) + + kubectlSafe(nil, "moco", "-n", "offline", "mysql", "-u", "moco-writable", "test", "--", + "-e", "CREATE DATABASE test") + kubectlSafe(nil, "moco", "-n", "offline", "mysql", "-u", "moco-writable", "test", "--", + "-D", "test", "-e", "CREATE TABLE t (id INT NOT NULL AUTO_INCREMENT, data VARCHAR(32) NOT NULL, PRIMARY KEY (id), KEY key1 (data), KEY key2 (data, id)) ENGINE=InnoDB") + kubectlSafe(nil, "moco", "-n", "offline", "mysql", "-u", "moco-writable", "test", "--", + "-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('aaa')") + }) + + It("should offline change succeed", func() { + kubectlSafe(fillTemplate(offlineChangedYAML), "apply", "-f", "-") + Eventually(func() error { + cluster, err := getCluster("offline", "test") + if err != nil { + return err + } + for _, cond := range cluster.Status.Conditions { + if cond.Type != mocov1beta2.ConditionHealthy { + continue + } + if cond.Status == metav1.ConditionFalse && cond.Reason == clustering.StateOffline.String() { + return nil + } + return fmt.Errorf("cluster is healthy: %s", cond.Status) + } + return errors.New("no health condition") + }).Should(Succeed()) + verifyAllPodsDeleted("offline") + }) + + It("should online change succeed", func() { + kubectlSafe(fillTemplate(offlineYAML), "apply", "-f", "-") + Eventually(func() error { + cluster, err := getCluster("offline", "test") + if err != nil { + return err + } + for _, cond := range cluster.Status.Conditions { + if cond.Type != mocov1beta2.ConditionHealthy { + continue + } + if cond.Status == metav1.ConditionTrue { + return nil + } + return fmt.Errorf("cluster is not healthy: %s", cond.Status) + } + return errors.New("no health condition") + }).Should(Succeed()) + out := kubectlSafe(nil, "moco", "-n", "offline", "mysql", "test", "--", + "-N", "-D", "test", "-e", "SELECT COUNT(*) FROM t") + count, err := strconv.Atoi(strings.TrimSpace(string(out))) + Expect(err).NotTo(HaveOccurred()) + Expect(count).To(Equal(1)) + }) + + It("should delete namespace", func() { + kubectlSafe(nil, "delete", "-n", "offline", "mysqlclusters", "--all") + verifyAllPodsDeleted("offline") + }) +}) diff --git a/e2e/pvc_test.go b/e2e/pvc_test.go index f2461176d..dae21a077 100644 --- a/e2e/pvc_test.go +++ b/e2e/pvc_test.go @@ -184,20 +184,6 @@ var _ = Context("pvc_test", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "pvc", "mysqlclusters", "--all") - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "pvc", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) + verifyAllPodsDeleted("pvc") }) }) diff --git a/e2e/replication_test.go b/e2e/replication_test.go index 796a7d035..06c5e231b 100644 --- a/e2e/replication_test.go +++ b/e2e/replication_test.go @@ -347,35 +347,8 @@ var _ = Context("replication", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "donor", "mysqlclusters", "--all") kubectlSafe(nil, "delete", "-n", "repl", "mysqlclusters", "--all") + verifyAllPodsDeleted("donor") + verifyAllPodsDeleted("repl") - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "donor", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "repl", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) }) }) diff --git a/e2e/run_test.go b/e2e/run_test.go index b7a625ce5..974f79df8 100644 --- a/e2e/run_test.go +++ b/e2e/run_test.go @@ -220,3 +220,21 @@ func verifyPVCSize(ns string, clusterName string) { return nil }).Should(Succeed()) } + +// verifyAllPodsDeleted validates that the namespace has no pods. +func verifyAllPodsDeleted(namespace string) { + Eventually(func() error { + out, err := kubectl(nil, "get", "-n", namespace, "pod", "-o", "json") + if err != nil { + return err + } + pods := &corev1.PodList{} + if err := json.Unmarshal(out, pods); err != nil { + return err + } + if len(pods.Items) > 0 { + return errors.New("wait until all Pods are deleted") + } + return nil + }).Should(Succeed()) +} diff --git a/e2e/stop_test.go b/e2e/stop_test.go index d393f3bf8..849c0eba2 100644 --- a/e2e/stop_test.go +++ b/e2e/stop_test.go @@ -342,20 +342,6 @@ var _ = Context("stop reconciliation and clustering", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "stop", "mysqlclusters", "--all") - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "stop", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) + verifyAllPodsDeleted("stop") }) }) diff --git a/e2e/testdata/offline_test.yaml b/e2e/testdata/offline_test.yaml new file mode 100644 index 000000000..60490e4cd --- /dev/null +++ b/e2e/testdata/offline_test.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: offline +--- +apiVersion: moco.cybozu.com/v1beta2 +kind: MySQLCluster +metadata: + namespace: offline + name: test +spec: + offline: false + replicas: 1 + podTemplate: + spec: + containers: + - name: mysqld + image: ghcr.io/cybozu-go/moco/mysql:{{ . }} + volumeClaimTemplates: + - metadata: + name: mysql-data + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi diff --git a/e2e/testdata/offline_test_changed.yaml b/e2e/testdata/offline_test_changed.yaml new file mode 100644 index 000000000..4547a7f90 --- /dev/null +++ b/e2e/testdata/offline_test_changed.yaml @@ -0,0 +1,21 @@ +apiVersion: moco.cybozu.com/v1beta2 +kind: MySQLCluster +metadata: + namespace: offline + name: test +spec: + offline: true + replicas: 1 + podTemplate: + spec: + containers: + - name: mysqld + image: ghcr.io/cybozu-go/moco/mysql:{{ . }} + volumeClaimTemplates: + - metadata: + name: mysql-data + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi diff --git a/e2e/upgrade_test.go b/e2e/upgrade_test.go index 454859677..1b7e2e7d5 100644 --- a/e2e/upgrade_test.go +++ b/e2e/upgrade_test.go @@ -168,20 +168,6 @@ var _ = Context("upgrade", func() { It("should delete clusters", func() { kubectlSafe(nil, "delete", "-n", "upgrade", "mysqlclusters", "--all") - - Eventually(func() error { - out, err := kubectl(nil, "get", "-n", "upgrade", "pod", "-o", "json") - if err != nil { - return err - } - pods := &corev1.PodList{} - if err := json.Unmarshal(out, pods); err != nil { - return err - } - if len(pods.Items) > 0 { - return errors.New("wait until all Pods are deleted") - } - return nil - }).Should(Succeed()) + verifyAllPodsDeleted("upgrade") }) })