From 2c2b97cbe2a89f2eb1acde28951473e50c65dd19 Mon Sep 17 00:00:00 2001 From: pingcap-github-bot Date: Wed, 27 May 2020 13:20:10 +0800 Subject: [PATCH] remove records of deleted pods from failure stores automatically and add an e2e test (#2560) (#2565) --- pkg/manager/member/tiflash_failover.go | 29 ++++- pkg/manager/member/tiflash_member_manager.go | 5 + pkg/manager/member/tikv_failover.go | 28 ++++- pkg/manager/member/tikv_failover_test.go | 110 +++++++++-------- pkg/manager/member/tikv_member_manager.go | 5 + pkg/util/tidbcluster/tidbcluster.go | 6 + tests/e2e/tidbcluster/stability.go | 118 +++++++++++++++++++ tests/e2e/util/tidbcluster/tidbcluster.go | 71 +++++++++++ 8 files changed, 311 insertions(+), 61 deletions(-) create mode 100644 tests/e2e/util/tidbcluster/tidbcluster.go diff --git a/pkg/manager/member/tiflash_failover.go b/pkg/manager/member/tiflash_failover.go index 569f9af7bb..7fb0cfad04 100644 --- a/pkg/manager/member/tiflash_failover.go +++ b/pkg/manager/member/tiflash_failover.go @@ -18,12 +18,14 @@ import ( "time" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/klog" ) +// TODO reuse tikvFailover since we share the same logic type tiflashFailover struct { tiflashFailoverPeriod time.Duration recorder record.EventRecorder @@ -34,6 +36,16 @@ func NewTiFlashFailover(tiflashFailoverPeriod time.Duration, recorder record.Eve return &tiflashFailover{tiflashFailoverPeriod, recorder} } +func (tff *tiflashFailover) isPodDesired(tc *v1alpha1.TidbCluster, podName string) bool { + ordinals := tc.TiFlashStsDesiredOrdinals(true) + ordinal, err := util.GetOrdinalFromPodName(podName) + if err != nil { + klog.Errorf("unexpected pod name %q: %v", podName, err) + return false + } + return ordinals.Has(ordinal) +} + func (tff *tiflashFailover) Failover(tc *v1alpha1.TidbCluster) error { ns := tc.GetNamespace() tcName := tc.GetName() @@ -43,6 +55,12 @@ func (tff *tiflashFailover) Failover(tc *v1alpha1.TidbCluster) error { if store.LastTransitionTime.IsZero() { continue } + if !tff.isPodDesired(tc, podName) { + // we should ignore the store record of deleted pod, otherwise the + // record of deleted pod may be added back to failure stores + // (before it enters into Offline/Tombstone state) + continue + } deadline := store.LastTransitionTime.Add(tff.tiflashFailoverPeriod) exist := false for _, failureStore := range tc.Status.TiFlash.FailureStores { @@ -74,8 +92,15 @@ func (tff *tiflashFailover) Failover(tc *v1alpha1.TidbCluster) error { return nil } -func (tff *tiflashFailover) Recover(_ *v1alpha1.TidbCluster) { - // Do nothing now +func (tff *tiflashFailover) Recover(tc *v1alpha1.TidbCluster) { + for key, failureStore := range tc.Status.TiFlash.FailureStores { + if !tff.isPodDesired(tc, failureStore.PodName) { + // If we delete the pods, e.g. by using advanced statefulset delete + // slots feature. We should remove the record of undesired pods, + // otherwise an extra replacement pod will be created. + delete(tc.Status.TiFlash.FailureStores, key) + } + } } type fakeTiFlashFailover struct{} diff --git a/pkg/manager/member/tiflash_member_manager.go b/pkg/manager/member/tiflash_member_manager.go index 2948595e7b..46c7da786b 100644 --- a/pkg/manager/member/tiflash_member_manager.go +++ b/pkg/manager/member/tiflash_member_manager.go @@ -205,6 +205,11 @@ func (tfmm *tiflashMemberManager) syncStatefulSet(tc *v1alpha1.TidbCluster) erro return err } + // Recover failed stores if any before generating desired statefulset + if len(tc.Status.TiFlash.FailureStores) > 0 { + tfmm.tiflashFailover.Recover(tc) + } + newSet, err := getNewStatefulSet(tc, cm) if err != nil { return err diff --git a/pkg/manager/member/tikv_failover.go b/pkg/manager/member/tikv_failover.go index 036659a471..559865856f 100644 --- a/pkg/manager/member/tikv_failover.go +++ b/pkg/manager/member/tikv_failover.go @@ -18,6 +18,7 @@ import ( "time" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" @@ -34,6 +35,16 @@ func NewTiKVFailover(tikvFailoverPeriod time.Duration, recorder record.EventReco return &tikvFailover{tikvFailoverPeriod, recorder} } +func (tf *tikvFailover) isPodDesired(tc *v1alpha1.TidbCluster, podName string) bool { + ordinals := tc.TiKVStsDesiredOrdinals(true) + ordinal, err := util.GetOrdinalFromPodName(podName) + if err != nil { + klog.Errorf("unexpected pod name %q: %v", podName, err) + return false + } + return ordinals.Has(ordinal) +} + func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { ns := tc.GetNamespace() tcName := tc.GetName() @@ -43,6 +54,12 @@ func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { if store.LastTransitionTime.IsZero() { continue } + if !tf.isPodDesired(tc, podName) { + // we should ignore the store record of deleted pod, otherwise the + // record of deleted pod may be added back to failure stores + // (before it enters into Offline/Tombstone state) + continue + } deadline := store.LastTransitionTime.Add(tf.tikvFailoverPeriod) exist := false for _, failureStore := range tc.Status.TiKV.FailureStores { @@ -75,8 +92,15 @@ func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error { return nil } -func (tf *tikvFailover) Recover(_ *v1alpha1.TidbCluster) { - // Do nothing now +func (tf *tikvFailover) Recover(tc *v1alpha1.TidbCluster) { + for key, failureStore := range tc.Status.TiKV.FailureStores { + if !tf.isPodDesired(tc, failureStore.PodName) { + // If we delete the pods, e.g. by using advanced statefulset delete + // slots feature. We should remove the record of undesired pods, + // otherwise an extra replacement pod will be created. + delete(tc.Status.TiKV.FailureStores, key) + } + } } type fakeTiKVFailover struct{} diff --git a/pkg/manager/member/tikv_failover_test.go b/pkg/manager/member/tikv_failover_test.go index 477d698a97..70f62c536b 100644 --- a/pkg/manager/member/tikv_failover_test.go +++ b/pkg/manager/member/tikv_failover_test.go @@ -25,31 +25,12 @@ import ( ) func TestTiKVFailoverFailover(t *testing.T) { - g := NewGomegaWithT(t) - - type testcase struct { + tests := []struct { name string update func(*v1alpha1.TidbCluster) err bool - expectFn func(*v1alpha1.TidbCluster) - } - testFn := func(test *testcase, t *testing.T) { - t.Log(test.name) - tc := newTidbClusterForPD() - tc.Spec.TiKV.MaxFailoverCount = pointer.Int32Ptr(3) - test.update(tc) - tikvFailover := newFakeTiKVFailover() - - err := tikvFailover.Failover(tc) - if test.err { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).NotTo(HaveOccurred()) - } - test.expectFn(tc) - } - - tests := []testcase{ + expectFn func(t *testing.T, tc *v1alpha1.TidbCluster) + }{ { name: "normal", update: func(tc *v1alpha1.TidbCluster) { @@ -67,8 +48,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(2)) }, }, @@ -80,8 +61,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(0)) }, }, @@ -97,8 +78,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(0)) }, }, @@ -113,8 +94,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(0)) }, }, @@ -136,8 +117,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(1)) }, }, @@ -147,17 +128,17 @@ func TestTiKVFailoverFailover(t *testing.T) { tc.Status.TiKV.Stores = map[string]v1alpha1.TiKVStore{ "3": { State: v1alpha1.TiKVStateDown, - PodName: "tikv-3", + PodName: "tikv-0", LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, }, - "10": { + "4": { State: v1alpha1.TiKVStateUp, - PodName: "tikv-10", + PodName: "tikv-4", LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, }, - "11": { + "5": { State: v1alpha1.TiKVStateUp, - PodName: "tikv-11", + PodName: "tikv-5", LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, }, } @@ -173,8 +154,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(3)) }, }, @@ -187,14 +168,14 @@ func TestTiKVFailoverFailover(t *testing.T) { PodName: "tikv-3", LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, }, - "10": { + "4": { State: v1alpha1.TiKVStateDown, - PodName: "tikv-10", + PodName: "tikv-4", LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, }, - "11": { + "5": { State: v1alpha1.TiKVStateUp, - PodName: "tikv-11", + PodName: "tikv-5", LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, }, } @@ -210,8 +191,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(3)) }, }, @@ -219,19 +200,19 @@ func TestTiKVFailoverFailover(t *testing.T) { name: "exceed max failover count2", update: func(tc *v1alpha1.TidbCluster) { tc.Status.TiKV.Stores = map[string]v1alpha1.TiKVStore{ - "12": { + "0": { State: v1alpha1.TiKVStateDown, - PodName: "tikv-12", + PodName: "tikv-0", LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, }, - "13": { + "4": { State: v1alpha1.TiKVStateDown, - PodName: "tikv-13", + PodName: "tikv-4", LastTransitionTime: metav1.Time{Time: time.Now().Add(-61 * time.Minute)}, }, - "14": { + "5": { State: v1alpha1.TiKVStateDown, - PodName: "tikv-14", + PodName: "tikv-5", LastTransitionTime: metav1.Time{Time: time.Now().Add(-70 * time.Minute)}, }, } @@ -251,8 +232,8 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(3)) }, }, @@ -293,14 +274,29 @@ func TestTiKVFailoverFailover(t *testing.T) { } }, err: false, - expectFn: func(tc *v1alpha1.TidbCluster) { - g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3)) + expectFn: func(t *testing.T, tc *v1alpha1.TidbCluster) { + g := NewGomegaWithT(t) g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(3)) }, }, } - for i := range tests { - testFn(&tests[i], t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewGomegaWithT(t) + tc := newTidbClusterForPD() + tc.Spec.TiKV.Replicas = 6 + tc.Spec.TiKV.MaxFailoverCount = pointer.Int32Ptr(3) + tt.update(tc) + tikvFailover := newFakeTiKVFailover() + + err := tikvFailover.Failover(tc) + if tt.err { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + tt.expectFn(t, tc) + }) } } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 086435171a..b2ae9a6c92 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -201,6 +201,11 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl return err } + // Recover failed stores if any before generating desired statefulset + if len(tc.Status.TiKV.FailureStores) > 0 { + tkmm.tikvFailover.Recover(tc) + } + newSet, err := getNewTiKVSetForTidbCluster(tc, cm) if err != nil { return err diff --git a/pkg/util/tidbcluster/tidbcluster.go b/pkg/util/tidbcluster/tidbcluster.go index 1ac275c11e..0f52138fd4 100644 --- a/pkg/util/tidbcluster/tidbcluster.go +++ b/pkg/util/tidbcluster/tidbcluster.go @@ -85,3 +85,9 @@ func filterOutCondition(conditions []v1alpha1.TidbClusterCondition, condType v1a } return newConditions } + +// GetTidbClusterReadyCondition extracts the tidbcluster ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetTidbClusterReadyCondition(status v1alpha1.TidbClusterStatus) *v1alpha1.TidbClusterCondition { + return GetTidbClusterCondition(status, v1alpha1.TidbClusterReady) +} diff --git a/tests/e2e/tidbcluster/stability.go b/tests/e2e/tidbcluster/stability.go index 7aad2f35c6..416fe7315f 100644 --- a/tests/e2e/tidbcluster/stability.go +++ b/tests/e2e/tidbcluster/stability.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" "github.com/pingcap/tidb-operator/tests/e2e/util/proxiedpdclient" utiltidb "github.com/pingcap/tidb-operator/tests/e2e/util/tidb" + utiltidbcluster "github.com/pingcap/tidb-operator/tests/e2e/util/tidbcluster" utiltikv "github.com/pingcap/tidb-operator/tests/e2e/util/tikv" "github.com/pingcap/tidb-operator/tests/pkg/fixture" v1 "k8s.io/api/core/v1" @@ -858,4 +859,121 @@ var _ = ginkgo.Describe("[tidb-operator][Stability]", func() { }) + ginkgo.Context("[Feature: AdvancedStatefulSet][Feature: AutoFailover] operator with advanced statefulset and short auto-failover periods", func() { + var ocfg *tests.OperatorConfig + var oa tests.OperatorActions + var genericCli client.Client + failoverPeriod := time.Minute + + ginkgo.BeforeEach(func() { + ocfg = &tests.OperatorConfig{ + Namespace: ns, + ReleaseName: "operator", + Image: cfg.OperatorImage, + Tag: cfg.OperatorTag, + LogLevel: "4", + TestMode: true, + StringValues: map[string]string{ + "controllerManager.pdFailoverPeriod": failoverPeriod.String(), + "controllerManager.tidbFailoverPeriod": failoverPeriod.String(), + "controllerManager.tikvFailoverPeriod": failoverPeriod.String(), + "controllerManager.tiflashFailoverPeriod": failoverPeriod.String(), + }, + Features: []string{ + "AdvancedStatefulSet=true", + }, + } + oa = tests.NewOperatorActions(cli, c, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, e2econfig.TestConfig, nil, fw, f) + ginkgo.By("Installing CRDs") + oa.CleanCRDOrDie() + oa.InstallCRDOrDie(ocfg) + ginkgo.By("Installing tidb-operator") + oa.CleanOperatorOrDie(ocfg) + oa.DeployOperatorOrDie(ocfg) + var err error + genericCli, err = client.New(config, client.Options{Scheme: scheme.Scheme}) + framework.ExpectNoError(err, "failed to create clientset") + }) + + // https://github.com/pingcap/tidb-operator/issues/1464 + ginkgo.It("delete the failed pod via delete-slots feature of Advanced Statefulset after failover", func() { + ginkgo.By("Make sure we have at least 3 schedulable nodes") + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">=", 3)) + + clusterName := "failover" + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV3Version) + tc.Spec.SchedulerName = "" + tc.Spec.PD.Replicas = 1 + tc.Spec.PD.Config.Schedule = &v1alpha1.PDScheduleConfig{ + MaxStoreDownTime: pointer.StringPtr("1m"), + } + tc.Spec.TiDB.Replicas = 1 + tc.Spec.TiKV.Replicas = 3 + err := genericCli.Create(context.TODO(), tc) + framework.ExpectNoError(err) + + ginkgo.By("Waiting for the tidb cluster to become ready") + err = utiltidbcluster.WaitForTidbClusterReady(cli, tc.Namespace, tc.Name, time.Minute*30, 0) + framework.ExpectNoError(err) + + ginkgo.By("Fail a TiKV store") + podName := controller.TiKVMemberName(clusterName) + "-1" + f.ExecCommandInContainer(podName, "tikv", "sh", "-c", "rm -rf /var/lib/tikv/*") + + ginkgo.By("Waiting for the store to be put into failsure stores") + err = utiltidbcluster.WaitForTidbClusterCondition(cli, tc.Namespace, tc.Name, time.Minute*5, func(tc *v1alpha1.TidbCluster) (bool, error) { + exist := false + for _, failureStore := range tc.Status.TiKV.FailureStores { + if failureStore.PodName == podName { + exist = true + } + } + return exist, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("Waiting for the new pod to be created") + newPodName := controller.TiKVMemberName(clusterName) + "-3" + err = wait.PollImmediate(time.Second*10, 1*time.Minute, func() (bool, error) { + _, err := c.CoreV1().Pods(ns).Get(newPodName, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return false, nil + } + return !apierrors.IsNotFound(err), nil + }) + framework.ExpectNoError(err) + + ginkgo.By(fmt.Sprintf("Deleting the failed pod %q via delete-slots", podName)) + err = controller.GuaranteedUpdate(genericCli, tc, func() error { + if tc.Annotations == nil { + tc.Annotations = map[string]string{} + } + tc.Annotations[label.AnnTiKVDeleteSlots] = mustToString(sets.NewInt32(1)) + return nil + }) + framework.ExpectNoError(err) + + ginkgo.By(fmt.Sprintf("Waiting for the failed pod %q to be gone", podName)) + err = e2epod.WaitForPodNotFoundInNamespace(c, podName, ns, time.Minute*5) + framework.ExpectNoError(err) + + ginkgo.By(fmt.Sprintf("Waiting for the record of failed pod to be removed from failure stores")) + err = utiltidbcluster.WaitForTidbClusterCondition(cli, tc.Namespace, tc.Name, time.Minute*5, func(tc *v1alpha1.TidbCluster) (bool, error) { + exist := false + for _, failureStore := range tc.Status.TiKV.FailureStores { + if failureStore.PodName == podName { + exist = true + } + } + return !exist, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("Waiting for the tidb cluster to become ready") + err = utiltidbcluster.WaitForTidbClusterReady(cli, tc.Namespace, tc.Name, time.Minute*30, 0) + framework.ExpectNoError(err) + }) + }) + }) diff --git a/tests/e2e/util/tidbcluster/tidbcluster.go b/tests/e2e/util/tidbcluster/tidbcluster.go new file mode 100644 index 0000000000..cb4c109878 --- /dev/null +++ b/tests/e2e/util/tidbcluster/tidbcluster.go @@ -0,0 +1,71 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tidbcluster + +import ( + "time" + + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + "github.com/pingcap/tidb-operator/pkg/util/tidbcluster" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + testutils "k8s.io/kubernetes/test/utils" +) + +var ( + pollInterval = time.Second * 10 +) + +type TidbClusterCondition func(tc *v1alpha1.TidbCluster) (bool, error) + +// WaitForTidbClusterCondition waits for a TidbCluster to be matched to the given condition. +func WaitForTidbClusterCondition(c versioned.Interface, ns, name string, timeout time.Duration, condition TidbClusterCondition) error { + return wait.PollImmediate(pollInterval, timeout, func() (bool, error) { + tc, err := c.PingcapV1alpha1().TidbClusters(ns).Get(name, metav1.GetOptions{}) + if err != nil { + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + return condition(tc) + }) +} + +// IsTidbClusterReady returns true if a tidbcluster is ready; false otherwise. +func IsTidbClusterReady(tc *v1alpha1.TidbCluster) bool { + condition := tidbcluster.GetTidbClusterReadyCondition(tc.Status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// IsTidbClusterAvaiable returns true if a tidbcluster is ready for at least minReadyDuration duration; false otherwise. +func IsTidbClusterAvaiable(tc *v1alpha1.TidbCluster, minReadyDuration time.Duration, now time.Time) bool { + if !IsTidbClusterReady(tc) { + return false + } + c := tidbcluster.GetTidbClusterReadyCondition(tc.Status) + if minReadyDuration <= 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadyDuration).Before(now) { + return true + } + return false +} + +// WaitForTidbClusterReady waits for a TidbClusterCondition to be ready for at least minReadyDuration duration. +func WaitForTidbClusterReady(c versioned.Interface, ns, name string, timeout time.Duration, minReadyDuration time.Duration) error { + return WaitForTidbClusterCondition(c, ns, name, timeout, func(tc *v1alpha1.TidbCluster) (bool, error) { + return IsTidbClusterAvaiable(tc, minReadyDuration, time.Now()), nil + }) +}