Skip to content

Commit

Permalink
Directly use CRD in e2e test instead of helm (#2545) (#2608)
Browse files Browse the repository at this point in the history
  • Loading branch information
sre-bot authored Jun 2, 2020
1 parent 445908c commit f50e93d
Show file tree
Hide file tree
Showing 3 changed files with 197 additions and 63 deletions.
124 changes: 124 additions & 0 deletions tests/crd_test_util.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package tests

import (
"fmt"
"time"

"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/tests/slack"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)

func GetTidbClusterOrDie(cli versioned.Interface, name, namespace string) *v1alpha1.TidbCluster {
tc, err := cli.PingcapV1alpha1().TidbClusters(namespace).Get(name, metav1.GetOptions{})
if err != nil {
slack.NotifyAndPanic(err)
}
return tc
}

func CreateTidbClusterOrDie(cli versioned.Interface, tc *v1alpha1.TidbCluster) {
_, err := cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Create(tc)
if err != nil {
slack.NotifyAndPanic(err)
}
}

func UpdateTidbClusterOrDie(cli versioned.Interface, tc *v1alpha1.TidbCluster) {
err := wait.Poll(5*time.Second, 3*time.Minute, func() (done bool, err error) {
latestTC, err := cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
latestTC.Spec = tc.Spec
_, err = cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Update(latestTC)
if err != nil {
return false, nil
}
return true, nil
})
if err != nil {
slack.NotifyAndPanic(err)
}
}

func CheckDisasterToleranceOrDie(kubeCli kubernetes.Interface, tc *v1alpha1.TidbCluster) {
err := checkDisasterTolerance(kubeCli, tc)
if err != nil {
slack.NotifyAndPanic(err)
}
}

func checkDisasterTolerance(kubeCli kubernetes.Interface, cluster *v1alpha1.TidbCluster) error {
pds, err := kubeCli.CoreV1().Pods(cluster.Namespace).List(
metav1.ListOptions{LabelSelector: labels.SelectorFromSet(
label.New().Instance(cluster.Name).PD().Labels(),
).String()})
if err != nil {
return err
}
err = checkPodsDisasterTolerance(pds.Items)
if err != nil {
return err
}

tikvs, err := kubeCli.CoreV1().Pods(cluster.Namespace).List(
metav1.ListOptions{LabelSelector: labels.SelectorFromSet(
label.New().Instance(cluster.Name).TiKV().Labels(),
).String()})
if err != nil {
return err
}
err = checkPodsDisasterTolerance(tikvs.Items)
if err != nil {
return err
}

tidbs, err := kubeCli.CoreV1().Pods(cluster.Namespace).List(
metav1.ListOptions{LabelSelector: labels.SelectorFromSet(
label.New().Instance(cluster.Name).TiDB().Labels(),
).String()})
if err != nil {
return err
}
return checkPodsDisasterTolerance(tidbs.Items)
}

func checkPodsDisasterTolerance(allPods []corev1.Pod) error {
for _, pod := range allPods {
if pod.Spec.Affinity == nil {
return fmt.Errorf("the pod:[%s/%s] has not Affinity", pod.Namespace, pod.Name)
}
if pod.Spec.Affinity.PodAntiAffinity == nil {
return fmt.Errorf("the pod:[%s/%s] has not Affinity.PodAntiAffinity", pod.Namespace, pod.Name)
}
if len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 {
return fmt.Errorf("the pod:[%s/%s] has not PreferredDuringSchedulingIgnoredDuringExecution", pod.Namespace, pod.Name)
}
for _, prefer := range pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
if prefer.PodAffinityTerm.TopologyKey != RackLabel {
return fmt.Errorf("the pod:[%s/%s] topology key is not %s", pod.Namespace, pod.Name, RackLabel)
}
}
}
return nil
}
101 changes: 39 additions & 62 deletions tests/e2e/tidbcluster/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,38 +146,43 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
for _, clusterCfg := range clusterCfgs {
localCfg := clusterCfg
ginkgo.It(fmt.Sprintf("[TiDB Version: %s] %s", localCfg.Version, localCfg.Name), func() {
cluster := newTidbClusterConfig(e2econfig.TestConfig, ns, localCfg.Name, "", localCfg.Version)
if len(localCfg.Values) > 0 {
for k, v := range localCfg.Values {
cluster.Resources[k] = v
}
}

cluster := newTidbCluster(ns, localCfg.Name, localCfg.Version)
cluster.Spec.EnablePVReclaim = pointer.BoolPtr(true)
// support reclaim pv when scale in tikv or pd component
cluster.EnablePVReclaim = true
oa.DeployTidbClusterOrDie(&cluster)
oa.CheckTidbClusterStatusOrDie(&cluster)
oa.CheckDisasterToleranceOrDie(&cluster)
oa.CheckInitSQLOrDie(&cluster)

tests.CreateTidbClusterOrDie(cli, cluster)
err := oa.WaitForTidbClusterReady(cluster, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err)
tests.CheckDisasterToleranceOrDie(c, cluster)

// scale
cluster.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5)
oa.ScaleTidbClusterOrDie(&cluster)
oa.CheckTidbClusterStatusOrDie(&cluster)
oa.CheckDisasterToleranceOrDie(&cluster)
tc := tests.GetTidbClusterOrDie(cli, cluster.Name, cluster.Namespace)
tc.Spec.TiDB.Replicas = 3
tc.Spec.TiKV.Replicas = 5
tc.Spec.PD.Replicas = 5
tests.UpdateTidbClusterOrDie(cli, tc)
err = oa.WaitForTidbClusterReady(cluster, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err)
tests.CheckDisasterToleranceOrDie(c, cluster)

cluster.ScaleTiDB(2).ScaleTiKV(4).ScalePD(3)
oa.ScaleTidbClusterOrDie(&cluster)
oa.CheckTidbClusterStatusOrDie(&cluster)
oa.CheckDisasterToleranceOrDie(&cluster)
tc = tests.GetTidbClusterOrDie(cli, cluster.Name, cluster.Namespace)
tc.Spec.TiDB.Replicas = 2
tc.Spec.TiKV.Replicas = 4
tc.Spec.PD.Replicas = 3
tests.UpdateTidbClusterOrDie(cli, tc)
err = oa.WaitForTidbClusterReady(cluster, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err)
tests.CheckDisasterToleranceOrDie(c, cluster)

// configuration change
cluster.EnableConfigMapRollout = true
cluster.UpdatePdMaxReplicas(cfg.PDMaxReplicas).
UpdateTiKVGrpcConcurrency(cfg.TiKVGrpcConcurrency).
UpdateTiDBTokenLimit(cfg.TiDBTokenLimit)
oa.UpgradeTidbClusterOrDie(&cluster)
oa.CheckTidbClusterStatusOrDie(&cluster)
tc = tests.GetTidbClusterOrDie(cli, cluster.Name, cluster.Namespace)
tc.Spec.ConfigUpdateStrategy = v1alpha1.ConfigUpdateStrategyRollingUpdate
tc.Spec.PD.MaxFailoverCount = pointer.Int32Ptr(4)
tc.Spec.TiKV.MaxFailoverCount = pointer.Int32Ptr(4)
tc.Spec.TiDB.MaxFailoverCount = pointer.Int32Ptr(4)
tests.UpdateTidbClusterOrDie(cli, tc)
err = oa.WaitForTidbClusterReady(cluster, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err)
})
}
})
Expand Down Expand Up @@ -693,42 +698,6 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
framework.ExpectNoError(err, "Expected tidbcluster pod restarted")
})

ginkgo.It("should be operable without helm [API]", func() {
tc := fixture.GetTidbCluster(ns, "plain-cr", utilimage.TiDBV3Version)
err := genericCli.Create(context.TODO(), tc)
framework.ExpectNoError(err, "Expected TiDB cluster created")
err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err, "Expected TiDB cluster ready")

err = controller.GuaranteedUpdate(genericCli, tc, func() error {
tc.Spec.PD.Replicas = 5
tc.Spec.TiKV.Replicas = 5
tc.Spec.TiDB.Replicas = 4
return nil
})
framework.ExpectNoError(err, "Expected TiDB cluster updated")
err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err, "Expected TiDB cluster scaled out and ready")

err = controller.GuaranteedUpdate(genericCli, tc, func() error {
tc.Spec.Version = utilimage.TiDBV3UpgradeVersion
return nil
})
framework.ExpectNoError(err, "Expected TiDB cluster updated")
err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err, "Expected TiDB cluster upgraded to new version and ready")

err = controller.GuaranteedUpdate(genericCli, tc, func() error {
tc.Spec.PD.Replicas = 3
tc.Spec.TiKV.Replicas = 3
tc.Spec.TiDB.Replicas = 2
return nil
})
framework.ExpectNoError(err, "Expected TiDB cluster updated")
err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err, "Expected TiDB cluster scaled in and ready")
})

ginkgo.It("TidbMonitor: Deploying and checking monitor", func() {
cluster := newTidbClusterConfig(e2econfig.TestConfig, ns, "monitor-test", "admin", utilimage.TiDBV3Version)
cluster.Resources["pd.replicas"] = "1"
Expand Down Expand Up @@ -1271,6 +1240,14 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
})
})

func newTidbCluster(ns, clusterName, tidbVersion string) *v1alpha1.TidbCluster {
tc := fixture.GetTidbCluster(ns, clusterName, tidbVersion)
tc.Spec.EnablePVReclaim = pointer.BoolPtr(false)
tc.Spec.PD.StorageClassName = pointer.StringPtr("local-storage")
tc.Spec.TiKV.StorageClassName = pointer.StringPtr("local-storage")
return tc
}

func newTidbClusterConfig(cfg *tests.Config, ns, clusterName, password, tidbVersion string) tests.TidbClusterConfig {
return tests.TidbClusterConfig{
Namespace: ns,
Expand Down
35 changes: 34 additions & 1 deletion tests/pkg/fixture/fixture.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ func WithStorage(r corev1.ResourceRequirements, size string) corev1.ResourceRequ
r.Requests = corev1.ResourceList{}
}
r.Requests[corev1.ResourceStorage] = resource.MustParse(size)

return r
}

Expand All @@ -84,6 +83,7 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
if v, err := semver.NewVersion(version); err == nil && v.LessThan(tikvV4Beta) {
tikvStorageConfig = nil
}

return &v1alpha1.TidbCluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Expand All @@ -109,6 +109,9 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
MaxStoreDownTime: pointer.StringPtr("5m"),
},
},
ComponentSpec: v1alpha1.ComponentSpec{
Affinity: buildAffinity(name, ns, v1alpha1.PDMemberType),
},
},

TiKV: v1alpha1.TiKVSpec{
Expand All @@ -121,6 +124,9 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
Server: &v1alpha1.TiKVServerConfig{},
Storage: tikvStorageConfig,
},
ComponentSpec: v1alpha1.ComponentSpec{
Affinity: buildAffinity(name, ns, v1alpha1.TiKVMemberType),
},
},

TiDB: v1alpha1.TiDBSpec{
Expand All @@ -140,6 +146,33 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
Level: pointer.StringPtr("info"),
},
},
ComponentSpec: v1alpha1.ComponentSpec{
Affinity: buildAffinity(name, ns, v1alpha1.TiDBMemberType),
},
},
},
}
}

func buildAffinity(name, namespace string, memberType v1alpha1.MemberType) *corev1.Affinity {
return &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
Weight: int32(50),
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/component": memberType.String(),
"app.kubernetes.io/instance": name,
},
},
Namespaces: []string{
namespace,
},
TopologyKey: "rack",
},
},
},
},
}
Expand Down

0 comments on commit f50e93d

Please sign in to comment.