Skip to content

Commit

Permalink
Remove storeLabels (#663)
Browse files Browse the repository at this point in the history
* remove storeLabels
  • Loading branch information
xiaojingchen authored Jul 18, 2019
1 parent 9252057 commit 825705b
Show file tree
Hide file tree
Showing 8 changed files with 40 additions and 44 deletions.
10 changes: 0 additions & 10 deletions charts/tidb-cluster/templates/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,16 +67,6 @@ spec:
annotations:
{{ toYaml .Values.tikv.annotations | indent 6 }}
{{- end }}
{{- if .Values.tikv.storeLabels }}
storeLabels:
{{ toYaml .Values.tikv.storeLabels | indent 4 }}
{{- else }}
storeLabels:
- region
- zone
- rack
- host
{{- end }}
tidb:
replicas: {{ .Values.tidb.replicas }}
image: {{ .Values.tidb.image }}
Expand Down
11 changes: 0 additions & 11 deletions charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -241,17 +241,6 @@ tikv:
# effect: "NoSchedule"
annotations: {}

## storeLabels is used to define store label keys
## The label keys specified the location of a store.
## In order to use the location awareness feature of TiKV, users have to label their k8s nodes with the same labels.
## Note: current can not support these labels contains "/"
## The placement priorities is implied by the order of label keys.
## For example, ["zone", "rack"] means that we should place replicas to
## different zones first, then to different racks if we don't have enough zones.
## default value is ["region", "zone", "rack", "host"]
## If you change the default value below, please do sync the change to pd.config.[replication].location-labels
## storeLabels: ["region", "zone", "rack", "host"]

tidb:
# Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default
# tidb configurations(change to the tags of your tidb version),
Expand Down
1 change: 0 additions & 1 deletion pkg/apis/pingcap.com/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@ type TiKVSpec struct {
StorageClassName string `json:"storageClassName,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
StoreLabels []string `json:"storeLabels,omitempty"`
}

// TiKVPromGatewaySpec runs as a sidecar with TiKVSpec
Expand Down
5 changes: 0 additions & 5 deletions pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 11 additions & 1 deletion pkg/manager/member/tikv_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -524,6 +524,16 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (
return setCount, err
}

config, err := pdCli.GetConfig()
if err != nil {
return setCount, err
}

locationLabels := []string(config.Replication.LocationLabels)
if locationLabels == nil {
return setCount, nil
}

for _, store := range storesInfo.Stores {
status := tkmm.getTiKVStore(store)
if status == nil {
Expand All @@ -537,7 +547,7 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (
}

nodeName := pod.Spec.NodeName
ls, err := tkmm.getNodeLabels(nodeName, tc.Spec.TiKV.StoreLabels)
ls, err := tkmm.getNodeLabels(nodeName, locationLabels)
if err != nil || len(ls) == 0 {
glog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName)
continue
Expand Down
26 changes: 23 additions & 3 deletions pkg/manager/member/tikv_member_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (

. "github.com/onsi/gomega"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/pd/pkg/typeutil"
"github.com/pingcap/pd/server"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
Expand Down Expand Up @@ -72,7 +74,13 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) {
}

tkmm, fakeSetControl, fakeSvcControl, pdClient, _, _ := newFakeTiKVMemberManager(tc)

pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) {
return &server.Config{
Replication: server.ReplicationConfig{
LocationLabels: typeutil.StringSlice{"region", "zone", "rack", "host"},
},
}, nil
})
if test.errWhenGetStores {
pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) {
return nil, fmt.Errorf("failed to get stores from tikv cluster")
Expand Down Expand Up @@ -221,6 +229,13 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) {
tcName := tc.Name

tkmm, fakeSetControl, fakeSvcControl, pdClient, _, _ := newFakeTiKVMemberManager(tc)
pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) {
return &server.Config{
Replication: server.ReplicationConfig{
LocationLabels: typeutil.StringSlice{"region", "zone", "rack", "host"},
},
}, nil
})
if test.errWhenGetStores {
pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) {
return nil, fmt.Errorf("failed to get stores from pd cluster")
Expand Down Expand Up @@ -489,9 +504,14 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) {
}
testFn := func(test *testcase, t *testing.T) {
tc := newTidbClusterForPD()
tc.Spec.TiKV.StoreLabels = []string{"region", "zone", "rack"}
pmm, _, _, pdClient, podIndexer, nodeIndexer := newFakeTiKVMemberManager(tc)

pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) {
return &server.Config{
Replication: server.ReplicationConfig{
LocationLabels: typeutil.StringSlice{"region", "zone", "rack", "host"},
},
}, nil
})
if test.errWhenGetStores {
pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) {
return nil, fmt.Errorf("failed to get stores")
Expand Down
4 changes: 2 additions & 2 deletions tests/cluster_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func (tc *TidbClusterConfig) BuildSubValues(path string) (string, error) {
fmt.Sprintf(`level = "%s"`, pdLogLevel),
"[replication]",
fmt.Sprintf("max-replicas = %d", pdMaxReplicas),
`location-labels = ["region", "zone", "rack", "host"]`,
fmt.Sprintf(`location-labels = ["%s"]`, tc.TopologyKey),
}
tikvConfig := []string{
"[log]",
Expand All @@ -113,7 +113,7 @@ func (tc *TidbClusterConfig) BuildSubValues(path string) (string, error) {
"[log]",
`level = "info"`,
}
subValues := GetAffinityConfigOrDie(tc.ClusterName, tc.Namespace, tc.TopologyKey, []string{tc.TopologyKey}, pdConfig, tikvConfig, tidbConfig)
subValues := GetSubValuesOrDie(tc.ClusterName, tc.Namespace, tc.TopologyKey, pdConfig, tikvConfig, tidbConfig)
subVaulesPath := fmt.Sprintf("%s/%s.yaml", path, tc.ClusterName)
_, err := os.Stat(subVaulesPath)
if err != nil {
Expand Down
15 changes: 4 additions & 11 deletions tests/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,6 @@ func GetPodsByLabels(kubeCli kubernetes.Interface, node string, lables map[strin
}

var affinityTemp string = `{{.Kind}}:
{{ $length := len .StoreLabels}} {{ if or (not .StoreLabels) (eq $length 0)}}
{{else if eq .Kind "tikv"}}
storeLabels:
{{range .StoreLabels}} - {{.}}
{{end}}
{{end}}
config: |
{{range .Config}} {{.}}
{{end}}
Expand All @@ -118,28 +112,27 @@ type AffinityInfo struct {
Weight int
Namespace string
TopologyKey string
StoreLabels []string
Config []string
}

func GetAffinityConfigOrDie(clusterName, namespace, topologyKey string, storeLabels []string, pdConfig []string, tikvConfig []string, tidbConfig []string) string {
func GetSubValuesOrDie(clusterName, namespace, topologyKey string, pdConfig []string, tikvConfig []string, tidbConfig []string) string {
temp, err := template.New("dt-affinity").Parse(affinityTemp)
if err != nil {
slack.NotifyAndPanic(err)
}

pdbuff := new(bytes.Buffer)
err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels, Config: pdConfig})
err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, Config: pdConfig})
if err != nil {
slack.NotifyAndPanic(err)
}
tikvbuff := new(bytes.Buffer)
err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels, Config: tikvConfig})
err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, Config: tikvConfig})
if err != nil {
slack.NotifyAndPanic(err)
}
tidbbuff := new(bytes.Buffer)
err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels, Config: tidbConfig})
err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, Config: tidbConfig})
if err != nil {
slack.NotifyAndPanic(err)
}
Expand Down

0 comments on commit 825705b

Please sign in to comment.