diff --git a/.gitignore b/.gitignore index 81e2de79eb9..e100d144118 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ tags /.retools/ vendor default* +*.bak +.vscode/ diff --git a/client/client.go b/client/client.go index 665ae346639..2a1064e17fa 100644 --- a/client/client.go +++ b/client/client.go @@ -15,10 +15,6 @@ package pd import ( "context" - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net/url" "strings" "sync" "time" @@ -26,11 +22,11 @@ import ( opentracing "github.com/opentracing/opentracing-go" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - log "github.com/pingcap/log" + "github.com/pingcap/log" + "github.com/pingcap/pd/pkg/grpcutil" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" ) // Client is a PD (Placement Driver) client. @@ -272,43 +268,7 @@ func (c *client) getOrCreateGRPCConn(addr string) (*grpc.ClientConn, error) { return conn, nil } - opt := grpc.WithInsecure() - if len(c.security.CAPath) != 0 { - - certificates := []tls.Certificate{} - if len(c.security.CertPath) != 0 && len(c.security.KeyPath) != 0 { - // Load the client certificates from disk - certificate, err := tls.LoadX509KeyPair(c.security.CertPath, c.security.KeyPath) - if err != nil { - return nil, errors.Errorf("could not load client key pair: %s", err) - } - certificates = append(certificates, certificate) - } - - // Create a certificate pool from the certificate authority - certPool := x509.NewCertPool() - ca, err := ioutil.ReadFile(c.security.CAPath) - if err != nil { - return nil, errors.Errorf("could not read ca certificate: %s", err) - } - - // Append the certificates from the CA - if !certPool.AppendCertsFromPEM(ca) { - return nil, errors.New("failed to append ca certs") - } - - creds := credentials.NewTLS(&tls.Config{ - Certificates: certificates, - RootCAs: certPool, - }) - - opt = grpc.WithTransportCredentials(creds) - } - u, err := url.Parse(addr) - if err != nil { - return nil, errors.WithStack(err) - } - cc, err := grpc.Dial(u.Host, opt) + cc, err := grpcutil.GetClientConn(addr, c.security.CAPath, c.security.CertPath, c.security.KeyPath) if err != nil { return nil, errors.WithStack(err) } diff --git a/docs/api.html b/docs/api.html index 99ae3895d25..c874d70ce1e 100644 --- a/docs/api.html +++ b/docs/api.html @@ -1,169 +1,173 @@ -Placement Driver API API documentation

/cluster/status

Cluster status.

get

Get cluster status.

/version

The version of PD server.

get

Get the version of PD server.

/status

The build info of PD server.

get

Get the build info of PD server.

/diagnose

Diagnostic information of the cluster.

get

/members

The PD servers in the cluster.

get

List all PD servers in the cluster.

A specific PD server.

delete

Remove a PD server from the cluster.

post

Set leader priority of a PD member.

A specific PD server.

delete

Remove a PD server from the cluster.

/leader

The leader PD server of the cluster.

get

Get the leader PD server of the cluster.

post

Transfer leadership to another PD server.

post

Transfer leadership to the specific PD server.

/health

Health status of PD servers.

get

/config

PD cluster configuration.

get

Get full config.

post

Update a config item.

Schedule configuration.

get

Get schedule config.

post

Update a schedule config item.

Replication configuration.

get

Get replication config.

post

Update a replication config item.

The config of a namespace.

get

Get configuration of a namespace.

post

Update a namespace config item.

delete

Delete a namespace config.

The label property configuration.

get

Get label property config.

post

Update label property config item.

/stores

The stores in the cluster.

get

Get stores in the cluster.

/store/{storeId}

A specific store.

get

Get a store's information.

delete

Take down a store from the cluster.

The specific store's state.

post

Set the store's state.

The specific store's label.

post

Set the store's label.

The specific store's weight.

post

Set the store's leader/region weight.

/labels

The store label values in the cluster.

get

List all label values.

get

List stores that have specific label values.

/region

A specific region in the cluster.

get

Search for a region by region ID.

get

Search for a region by a key.

/regions

The regions in the cluster.

get

List all regions in the cluster.

get

Get region count in the cluster.

get

List regions with the highest write flow.

get

List regions with the highest read flow.

get

List regions with the largest conf version.

get

List regions with the largest version.

get

List regions with the largest size.

get

List regions start from a key.

get

List regions with unhealthy status.

get

List sibling regions of a specific region.

get

List all regions of a specific store.

/schedulers

Running schedulers.

get

List running schedulers.

post

Create a scheduler.

A specific scheduler.

delete

Delete a scheduler.

/operators

Pending operators.

get

List pending operators.

post

Create an operator.

A specific Region's pending operator.

get

Get a Region's pending operator.

delete

Cancel a Region's pending operator.

/hotspot

The hot spots status in the cluster.

get

List the hot write regions.

get

List the hot read regions.

get

List the hot stores.

/stats

Statistics of the cluster.

get

Get region statistics of a specified range.

/trend

Trend of data growth and movements.

get

Get the growth and changes of data in the most recent period of time.

/admin

delete

Drop a specific region from cache.

The log level of PD server.

post

Set log level.

/classifier

The namespace classifier. Methods depend on current classifier.

\ No newline at end of file + background: transparent; +} +.parent { + color: #999; +} +.list-group-item > .badge { + float: none; + margin-right: 6px; +} +.panel-title > .methods { + float: right; +} +.badge { + border-radius: 0; + text-transform: uppercase; + width: 70px; + font-weight: normal; + color: #f3f3f6; + line-height: normal; +} +.badge_get { + background-color: #63a8e2; +} +.badge_post { + background-color: #6cbd7d; +} +.badge_put { + background-color: #22bac4; +} +.badge_delete { + background-color: #d26460; +} +.badge_patch { + background-color: #ccc444; +} +.list-group, +.panel-group { + margin-bottom: 0; +} +.panel-group .panel+.panel-white { + margin-top: 0; +} +.panel-group .panel-white { + border-bottom: 1px solid #F5F5F5; + border-radius: 0; +} +.panel-white:last-child { + border-bottom-color: white; + -webkit-box-shadow: none; + box-shadow: none; +} +.panel-white .panel-heading { + background: white; +} +.tab-pane ul { + padding-left: 2em; +} +.tab-pane h1 { + font-size: 1.3em; +} +.tab-pane h2 { + font-size: 1.2em; + padding-bottom: 4px; + border-bottom: 1px solid #ddd; +} +.tab-pane h3 { + font-size: 1.1em; +} +.tab-content { + border-left: 1px solid #ddd; + border-right: 1px solid #ddd; + border-bottom: 1px solid #ddd; + padding: 10px; +} +#sidebar { + margin-top: 30px; + padding-right: 5px; + overflow: auto; + height: 90%; +} +.top-resource-description { + border-bottom: 1px solid #ddd; + background: #fcfcfc; + padding: 15px 15px 0 15px; + margin: -15px -15px 10px -15px; +} +.resource-description { + border-bottom: 1px solid #fcfcfc; + background: #fcfcfc; + padding: 15px 15px 0 15px; + margin: -15px -15px 10px -15px; +} +.resource-description p:last-child { + margin: 0; +} +.list-group .badge { + float: left; +} +.method_description { + margin-left: 85px; +} +.method_description p:last-child { + margin: 0; +} +.list-group-item { + cursor: pointer; +} +.list-group-item:hover { + background-color: #f5f5f5; +} +pre code { + overflow: auto; + word-wrap: normal; + white-space: pre; +} +.items { + background: #f5f5f5; + color: #333; + border: 1px solid #ccc; + border-radius: 4px; + padding: 9.5px; + margin: 0 0 10px; + font-size: 13px; + line-height: 1.42857143; +} +.examples { + margin-left: 0.5em; +} +.resource-modal li > ul { + margin-bottom: 1em; +} +.required { + color: #f00; +} +

/cluster/status

Cluster status.

get

Get cluster status.

/version

The version of PD server.

get

Get the version of PD server.

/status

The build info of PD server.

get

Get the build info of PD server.

/diagnose

Diagnostic information of the cluster.

get

/members

The PD servers in the cluster.

get

List all PD servers in the cluster.

A specific PD server.

delete

Remove a PD server from the cluster.

post

Set leader priority of a PD member.

A specific PD server.

delete

Remove a PD server from the cluster.

/leader

The leader PD server of the cluster.

get

Get the leader PD server of the cluster.

post

Transfer leadership to another PD server.

post

Transfer leadership to the specific PD server.

/health

Health status of PD servers.

get

/config

PD cluster configuration.

get

Get full config.

post

Update a config item.

Schedule configuration.

get

Get schedule config.

post

Update a schedule config item.

Replication configuration.

get

Get replication config.

post

Update a replication config item.

The config of a namespace.

get

Get configuration of a namespace.

post

Update a namespace config item.

delete

Delete a namespace config.

The label property configuration.

get

Get label property config.

post

Update label property config item.

/stores

The stores in the cluster.

get

Get stores in the cluster.

/store/{storeId}

A specific store.

get

Get a store's information.

delete

Take down a store from the cluster.

The specific store's state.

post

Set the store's state.

The specific store's label.

post

Set the store's label.

The specific store's weight.

post

Set the store's leader/region weight.

/labels

The store label values in the cluster.

get

List all label values.

get

List stores that have specific label values.

/region

A specific region in the cluster.

get

Search for a region by region ID.

get

Search for a region by a key.

/regions

The regions in the cluster.

get

List all regions in the cluster.

get

Get region count in the cluster.

get

List regions with the highest write flow.

get

List regions with the highest read flow.

get

List regions with the largest conf version.

get

List regions with the largest version.

get

List regions with the largest size.

get

List regions start from a key.

get

List regions with unhealthy status.

get

List sibling regions of a specific region.

get

List all regions of a specific store.

/schedulers

Running schedulers.

get

List running schedulers.

post

Create a scheduler.

A specific scheduler.

delete

Delete a scheduler.

/operators

Pending operators.

get

List pending operators.

post

Create an operator.

A specific Region's pending operator.

get

Get a Region's pending operator.

delete

Cancel a Region's pending operator.

/hotspot

The hot spots status in the cluster.

get

List the hot write regions.

get

List the hot read regions.

get

List the hot stores.

/stats

Statistics of the cluster.

get

Get region statistics of a specified range.

/trend

Trend of data growth and movements.

get

Get the growth and changes of data in the most recent period of time.

/admin

delete

Drop a specific region from cache.

The log level of PD server.

post

Set log level.

/classifier

The namespace classifier. Methods depend on current classifier.

\ No newline at end of file diff --git a/pkg/grpcutil/grpcutil.go b/pkg/grpcutil/grpcutil.go new file mode 100644 index 00000000000..af120b0f22f --- /dev/null +++ b/pkg/grpcutil/grpcutil.go @@ -0,0 +1,69 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcutil + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// GetClientConn returns a gRPC client connection. +func GetClientConn(addr string, caPath string, certPath string, keyPath string) (*grpc.ClientConn, error) { + opt := grpc.WithInsecure() + if len(caPath) != 0 { + certificates := []tls.Certificate{} + if len(certPath) != 0 && len(keyPath) != 0 { + // Load the client certificates from disk + certificate, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, errors.Errorf("could not load client key pair: %s", err) + } + certificates = append(certificates, certificate) + } + + // Create a certificate pool from the certificate authority + certPool := x509.NewCertPool() + ca, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, errors.Errorf("could not read ca certificate: %s", err) + } + + // Append the certificates from the CA + if !certPool.AppendCertsFromPEM(ca) { + return nil, errors.New("failed to append ca certs") + } + + creds := credentials.NewTLS(&tls.Config{ + Certificates: certificates, + RootCAs: certPool, + }) + + opt = grpc.WithTransportCredentials(creds) + } + u, err := url.Parse(addr) + if err != nil { + return nil, errors.WithStack(err) + } + cc, err := grpc.Dial(u.Host, opt) + if err != nil { + return nil, errors.WithStack(err) + } + return cc, nil +} diff --git a/server/api/api.raml b/server/api/api.raml index 601b996826a..5c7a8ac21d2 100644 --- a/server/api/api.raml +++ b/server/api/api.raml @@ -924,7 +924,7 @@ types: uriParameters: filter: type: string - enum: [ miss-peer, extra-peer, pending-peer, down-peer, incorrect-ns ] + enum: [ miss-peer, extra-peer, pending-peer, down-peer, incorrect-ns, offline-peer, empty-region ] get: description: List regions with unhealthy status. responses: diff --git a/server/api/redirector.go b/server/api/redirector.go index 911f48e58bc..7cf61245cec 100644 --- a/server/api/redirector.go +++ b/server/api/redirector.go @@ -14,10 +14,12 @@ package api import ( + "crypto/tls" "io/ioutil" "net/http" "net/url" "strings" + "sync" log "github.com/pingcap/log" "github.com/pingcap/pd/server" @@ -33,6 +35,8 @@ const ( errRedirectToNotLeader = "redirect to not leader" ) +var initHTTPClientOnce sync.Once + type redirector struct { s *server.Server } @@ -67,7 +71,20 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http http.Error(w, err.Error(), http.StatusInternalServerError) return } - + initHTTPClientOnce.Do(func() { + var tlsConfig *tls.Config + tlsConfig, err = server.ToTLSConfig(h.s.GetSecurityConfig()) + dialClient = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + TLSClientConfig: tlsConfig, + }, + } + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } newCustomReverseProxies(urls).ServeHTTP(w, r) } diff --git a/server/api/region.go b/server/api/region.go index fbb87e21418..19f540bd8e5 100644 --- a/server/api/region.go +++ b/server/api/region.go @@ -256,6 +256,28 @@ func (h *regionsHandler) GetIncorrectNamespaceRegions(w http.ResponseWriter, r * h.rd.JSON(w, http.StatusOK, regionsInfo) } +func (h *regionsHandler) GetOfflinePeer(w http.ResponseWriter, r *http.Request) { + handler := h.svr.GetHandler() + regions, err := handler.GetOfflinePeer() + if err != nil { + h.rd.JSON(w, http.StatusInternalServerError, err.Error()) + return + } + regionsInfo := convertToAPIRegions(regions) + h.rd.JSON(w, http.StatusOK, regionsInfo) +} + +func (h *regionsHandler) GetEmptyRegion(w http.ResponseWriter, r *http.Request) { + handler := h.svr.GetHandler() + regions, err := handler.GetEmptyRegion() + if err != nil { + h.rd.JSON(w, http.StatusInternalServerError, err.Error()) + return + } + regionsInfo := convertToAPIRegions(regions) + h.rd.JSON(w, http.StatusOK, regionsInfo) +} + func (h *regionsHandler) GetRegionSiblings(w http.ResponseWriter, r *http.Request) { cluster := h.svr.GetRaftCluster() if cluster == nil { diff --git a/server/api/region_test.go b/server/api/region_test.go index b13ba137c47..c1c59d08ca7 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -107,6 +107,20 @@ func (s *testRegionSuite) TestRegionCheck(c *C) { err = readJSONWithURL(url, r3) c.Assert(err, IsNil) c.Assert(r3, DeepEquals, &RegionsInfo{Count: 1, Regions: []*RegionInfo{NewRegionInfo(r)}}) + + url = fmt.Sprintf("%s/regions/check/%s", s.urlPrefix, "offline-peer") + r4 := &RegionsInfo{} + err = readJSONWithURL(url, r4) + c.Assert(err, IsNil) + c.Assert(r4, DeepEquals, &RegionsInfo{Count: 0, Regions: []*RegionInfo{}}) + + r = r.Clone(core.SetApproximateSize(1)) + mustRegionHeartbeat(c, s.svr, r) + url = fmt.Sprintf("%s/regions/check/%s", s.urlPrefix, "empty-region") + r5 := &RegionsInfo{} + err = readJSONWithURL(url, r5) + c.Assert(err, IsNil) + c.Assert(r5, DeepEquals, &RegionsInfo{Count: 1, Regions: []*RegionInfo{NewRegionInfo(r)}}) } func (s *testRegionSuite) TestRegions(c *C) { diff --git a/server/api/router.go b/server/api/router.go index fb0d9499f7b..8f404f700ea 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -102,6 +102,8 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { router.HandleFunc("/api/v1/regions/check/extra-peer", regionsHandler.GetExtraPeerRegions).Methods("GET") router.HandleFunc("/api/v1/regions/check/pending-peer", regionsHandler.GetPendingPeerRegions).Methods("GET") router.HandleFunc("/api/v1/regions/check/down-peer", regionsHandler.GetDownPeerRegions).Methods("GET") + router.HandleFunc("/api/v1/regions/check/offline-peer", regionsHandler.GetOfflinePeer).Methods("GET") + router.HandleFunc("/api/v1/regions/check/empty-region", regionsHandler.GetEmptyRegion).Methods("GET") router.HandleFunc("/api/v1/regions/sibling/{id}", regionsHandler.GetRegionSiblings).Methods("GET") router.HandleFunc("/api/v1/regions/check/incorrect-ns", regionsHandler.GetIncorrectNamespaceRegions).Methods("GET") diff --git a/server/config.go b/server/config.go index 2ab75ebf435..6884ad30fc2 100644 --- a/server/config.go +++ b/server/config.go @@ -776,15 +776,23 @@ type SecurityConfig struct { KeyPath string `toml:"key-path" json:"key-path"` } +// ConvertToMap is used to convert SecurityConfig to a map. +func (s *SecurityConfig) ConvertToMap() map[string]string { + return map[string]string{ + "caPath": s.CAPath, + "certPath": s.CertPath, + "keyPath": s.KeyPath} +} + // ToTLSConfig generatres tls config. -func (s SecurityConfig) ToTLSConfig() (*tls.Config, error) { - if len(s.CertPath) == 0 && len(s.KeyPath) == 0 { +func ToTLSConfig(config map[string]string) (*tls.Config, error) { + if len(config["certPath"]) == 0 && len(config["keyPath"]) == 0 { return nil, nil } tlsInfo := transport.TLSInfo{ - CertFile: s.CertPath, - KeyFile: s.KeyPath, - TrustedCAFile: s.CAPath, + CertFile: config["certPath"], + KeyFile: config["keyPath"], + TrustedCAFile: config["caPath"], } tlsConfig, err := tlsInfo.ClientConfig() if err != nil { diff --git a/server/config_test.go b/server/config_test.go index ea09c4c4ea4..2c98851cbb7 100644 --- a/server/config_test.go +++ b/server/config_test.go @@ -32,7 +32,7 @@ type testConfigSuite struct{} func (s *testConfigSuite) TestTLS(c *C) { cfg := NewConfig() - tls, err := cfg.Security.ToTLSConfig() + tls, err := ToTLSConfig(cfg.Security.ConvertToMap()) c.Assert(err, IsNil) c.Assert(tls, IsNil) } diff --git a/server/core/store_option.go b/server/core/store_option.go index 75a412a81cc..135a3c9b9bb 100644 --- a/server/core/store_option.go +++ b/server/core/store_option.go @@ -16,6 +16,7 @@ package core import ( "time" + "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" ) @@ -26,28 +27,36 @@ type StoreCreateOption func(region *StoreInfo) // SetStoreAddress sets the address for the store. func SetStoreAddress(address string) StoreCreateOption { return func(store *StoreInfo) { - store.meta.Address = address + meta := proto.Clone(store.meta).(*metapb.Store) + meta.Address = address + store.meta = meta } } // SetStoreLabels sets the labels for the store. func SetStoreLabels(labels []*metapb.StoreLabel) StoreCreateOption { return func(store *StoreInfo) { - store.meta.Labels = labels + meta := proto.Clone(store.meta).(*metapb.Store) + meta.Labels = labels + store.meta = meta } } // SetStoreVersion sets the version for the store. func SetStoreVersion(version string) StoreCreateOption { return func(store *StoreInfo) { - store.meta.Version = version + meta := proto.Clone(store.meta).(*metapb.Store) + meta.Version = version + store.meta = meta } } // SetStoreState sets the state for the store. func SetStoreState(state metapb.StoreState) StoreCreateOption { return func(store *StoreInfo) { - store.meta.State = state + meta := proto.Clone(store.meta).(*metapb.Store) + meta.State = state + store.meta = meta } } diff --git a/server/handler.go b/server/handler.go index 2b6e3808ba4..bdcbea5fba2 100644 --- a/server/handler.go +++ b/server/handler.go @@ -735,6 +735,24 @@ func (h *Handler) GetIncorrectNamespaceRegions() ([]*core.RegionInfo, error) { return c.cachedCluster.GetRegionStatsByType(statistics.IncorrectNamespace), nil } +// GetOfflinePeer gets the region with offline peer. +func (h *Handler) GetOfflinePeer() ([]*core.RegionInfo, error) { + c := h.s.GetRaftCluster() + if c == nil { + return nil, ErrNotBootstrapped + } + return c.cachedCluster.GetRegionStatsByType(statistics.OfflinePeer), nil +} + +// GetEmptyRegion gets the region with empty size. +func (h *Handler) GetEmptyRegion() ([]*core.RegionInfo, error) { + c := h.s.GetRaftCluster() + if c == nil { + return nil, ErrNotBootstrapped + } + return c.cachedCluster.GetRegionStatsByType(statistics.EmptyRegion), nil +} + // ResetTS resets the ts with specified tso. func (h *Handler) ResetTS(ts uint64) error { tsoServer := h.s diff --git a/server/join.go b/server/join.go index 744dd4af978..3c4cd999b28 100644 --- a/server/join.go +++ b/server/join.go @@ -108,7 +108,7 @@ func PrepareJoinCluster(cfg *Config) error { } // Below are cases without data directory. - tlsConfig, err := cfg.Security.ToTLSConfig() + tlsConfig, err := ToTLSConfig(cfg.Security.ConvertToMap()) if err != nil { return err } diff --git a/server/region_syncer/client.go b/server/region_syncer/client.go index 3a794c16a95..e5581bf8fbc 100644 --- a/server/region_syncer/client.go +++ b/server/region_syncer/client.go @@ -15,14 +15,14 @@ package syncer import ( "context" - "net/url" "time" "github.com/pingcap/kvproto/pkg/pdpb" - log "github.com/pingcap/log" + "github.com/pingcap/log" + "github.com/pingcap/pd/pkg/grpcutil" "github.com/pingcap/pd/server/core" + "github.com/pkg/errors" "go.uber.org/zap" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -51,14 +51,9 @@ func (s *RegionSyncer) reset() { func (s *RegionSyncer) establish(addr string) (ClientStream, error) { s.reset() - u, err := url.Parse(addr) + cc, err := grpcutil.GetClientConn(addr, s.securityConfig["caPath"], s.securityConfig["certPath"], s.securityConfig["keyPath"]) if err != nil { - return nil, err - } - - cc, err := grpc.Dial(u.Host, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(msgSize))) - if err != nil { - return nil, err + return nil, errors.WithStack(err) } ctx, cancel := context.WithCancel(s.server.Context()) diff --git a/server/region_syncer/server.go b/server/region_syncer/server.go index 377c53fa7a1..796a4c2217c 100644 --- a/server/region_syncer/server.go +++ b/server/region_syncer/server.go @@ -22,7 +22,7 @@ import ( "github.com/juju/ratelimit" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - log "github.com/pingcap/log" + "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" "go.uber.org/zap" @@ -59,19 +59,21 @@ type Server interface { GetStorage() *core.KV Name() string GetMetaRegions() []*metapb.Region + GetSecurityConfig() map[string]string } // RegionSyncer is used to sync the region information without raft. type RegionSyncer struct { sync.RWMutex - streams map[string]ServerStream - ctx context.Context - cancel context.CancelFunc - server Server - closed chan struct{} - wg sync.WaitGroup - history *historyBuffer - limit *ratelimit.Bucket + streams map[string]ServerStream + ctx context.Context + cancel context.CancelFunc + server Server + closed chan struct{} + wg sync.WaitGroup + history *historyBuffer + limit *ratelimit.Bucket + securityConfig map[string]string } // NewRegionSyncer returns a region syncer. @@ -81,11 +83,12 @@ type RegionSyncer struct { // no longer etcd but go-leveldb. func NewRegionSyncer(s Server) *RegionSyncer { return &RegionSyncer{ - streams: make(map[string]ServerStream), - server: s, - closed: make(chan struct{}), - history: newHistoryBuffer(defaultHistoryBufferSize, s.GetStorage().GetRegionKV()), - limit: ratelimit.NewBucketWithRate(defaultBucketRate, defaultBucketCapacity), + streams: make(map[string]ServerStream), + server: s, + closed: make(chan struct{}), + history: newHistoryBuffer(defaultHistoryBufferSize, s.GetStorage().GetRegionKV()), + limit: ratelimit.NewBucketWithRate(defaultBucketRate, defaultBucketCapacity), + securityConfig: s.GetSecurityConfig(), } } diff --git a/server/schedule/filters.go b/server/schedule/filters.go index 928110eccf8..71b673d6f63 100644 --- a/server/schedule/filters.go +++ b/server/schedule/filters.go @@ -25,6 +25,8 @@ import ( // Filter is an interface to filter source and target store. type Filter interface { + // Scope is used to indicate where the filter will act on. + Scope() string Type() string // Return true if the store should not be used as a source store. FilterSource(opt Options, store *core.StoreInfo) bool @@ -38,7 +40,7 @@ func FilterSource(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("%d", store.GetID()) for _, filter := range filters { if filter.FilterSource(opt, store) { - filterCounter.WithLabelValues("filter-source", storeAddress, storeID, filter.Type()).Inc() + filterCounter.WithLabelValues("filter-source", storeAddress, storeID, filter.Scope(), filter.Type()).Inc() return true } } @@ -51,7 +53,7 @@ func FilterTarget(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("%d", store.GetID()) for _, filter := range filters { if filter.FilterTarget(opt, store) { - filterCounter.WithLabelValues("filter-target", storeAddress, storeID, filter.Type()).Inc() + filterCounter.WithLabelValues("filter-target", storeAddress, storeID, filter.Scope(), filter.Type()).Inc() return true } } @@ -59,18 +61,24 @@ func FilterTarget(opt Options, store *core.StoreInfo, filters []Filter) bool { } type excludedFilter struct { + scope string sources map[uint64]struct{} targets map[uint64]struct{} } // NewExcludedFilter creates a Filter that filters all specified stores. -func NewExcludedFilter(sources, targets map[uint64]struct{}) Filter { +func NewExcludedFilter(scope string, sources, targets map[uint64]struct{}) Filter { return &excludedFilter{ + scope: scope, sources: sources, targets: targets, } } +func (f *excludedFilter) Scope() string { + return f.scope +} + func (f *excludedFilter) Type() string { return "exclude-filter" } @@ -85,30 +93,15 @@ func (f *excludedFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { return ok } -type blockFilter struct{} - -// NewBlockFilter creates a Filter that filters all stores that are blocked from balance. -func NewBlockFilter() Filter { - return &blockFilter{} -} - -func (f *blockFilter) Type() string { - return "block-filter" -} - -func (f *blockFilter) FilterSource(opt Options, store *core.StoreInfo) bool { - return store.IsBlocked() -} +type overloadFilter struct{ scope string } -func (f *blockFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { - return store.IsBlocked() +// NewOverloadFilter creates a Filter that filters all stores that are overloaded from balance. +func NewOverloadFilter(scope string) Filter { + return &overloadFilter{scope: scope} } -type overloadFilter struct{} - -// NewOverloadFilter creates a Filter that filters all stores that are overloaded from balance. -func NewOverloadFilter() Filter { - return &overloadFilter{} +func (f *overloadFilter) Scope() string { + return f.scope } func (f *overloadFilter) Type() string { @@ -123,11 +116,15 @@ func (f *overloadFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { return store.IsOverloaded() } -type stateFilter struct{} +type stateFilter struct{ scope string } // NewStateFilter creates a Filter that filters all stores that are not UP. -func NewStateFilter() Filter { - return &stateFilter{} +func NewStateFilter(scope string) Filter { + return &stateFilter{scope: scope} +} + +func (f *stateFilter) Scope() string { + return f.scope } func (f *stateFilter) Type() string { @@ -142,11 +139,15 @@ func (f *stateFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { return !store.IsUp() } -type healthFilter struct{} +type healthFilter struct{ scope string } // NewHealthFilter creates a Filter that filters all stores that are Busy or Down. -func NewHealthFilter() Filter { - return &healthFilter{} +func NewHealthFilter(scope string) Filter { + return &healthFilter{scope: scope} +} + +func (f *healthFilter) Scope() string { + return f.scope } func (f *healthFilter) Type() string { @@ -168,31 +169,16 @@ func (f *healthFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { return f.filter(opt, store) } -type disconnectFilter struct{} - -// NewDisconnectFilter creates a Filter that filters all stores that are disconnected. -func NewDisconnectFilter() Filter { - return &disconnectFilter{} -} - -func (f *disconnectFilter) Type() string { - return "disconnect-filter" -} - -func (f *disconnectFilter) FilterSource(opt Options, store *core.StoreInfo) bool { - return store.IsDisconnected() -} - -func (f *disconnectFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { - return store.IsDisconnected() -} - -type pendingPeerCountFilter struct{} +type pendingPeerCountFilter struct{ scope string } // NewPendingPeerCountFilter creates a Filter that filters all stores that are // currently handling too many pending peers. -func NewPendingPeerCountFilter() Filter { - return &pendingPeerCountFilter{} +func NewPendingPeerCountFilter(scope string) Filter { + return &pendingPeerCountFilter{scope: scope} +} + +func (p *pendingPeerCountFilter) Scope() string { + return p.scope } func (p *pendingPeerCountFilter) Type() string { @@ -214,12 +200,16 @@ func (p *pendingPeerCountFilter) FilterTarget(opt Options, store *core.StoreInfo return p.filter(opt, store) } -type snapshotCountFilter struct{} +type snapshotCountFilter struct{ scope string } // NewSnapshotCountFilter creates a Filter that filters all stores that are // currently handling too many snapshots. -func NewSnapshotCountFilter() Filter { - return &snapshotCountFilter{} +func NewSnapshotCountFilter(scope string) Filter { + return &snapshotCountFilter{scope: scope} +} + +func (f *snapshotCountFilter) Scope() string { + return f.scope } func (f *snapshotCountFilter) Type() string { @@ -241,12 +231,17 @@ func (f *snapshotCountFilter) FilterTarget(opt Options, store *core.StoreInfo) b } type cacheFilter struct { + scope string cache *cache.TTLUint64 } // NewCacheFilter creates a Filter that filters all stores that are in the cache. -func NewCacheFilter(cache *cache.TTLUint64) Filter { - return &cacheFilter{cache: cache} +func NewCacheFilter(scope string, cache *cache.TTLUint64) Filter { + return &cacheFilter{scope: scope, cache: cache} +} + +func (f *cacheFilter) Scope() string { + return f.scope } func (f *cacheFilter) Type() string { @@ -261,12 +256,16 @@ func (f *cacheFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { return false } -type storageThresholdFilter struct{} +type storageThresholdFilter struct{ scope string } // NewStorageThresholdFilter creates a Filter that filters all stores that are // almost full. -func NewStorageThresholdFilter() Filter { - return &storageThresholdFilter{} +func NewStorageThresholdFilter(scope string) Filter { + return &storageThresholdFilter{scope: scope} +} + +func (f *storageThresholdFilter) Scope() string { + return f.scope } func (f *storageThresholdFilter) Type() string { @@ -283,6 +282,7 @@ func (f *storageThresholdFilter) FilterTarget(opt Options, store *core.StoreInfo // distinctScoreFilter ensures that distinct score will not decrease. type distinctScoreFilter struct { + scope string labels []string stores []*core.StoreInfo safeScore float64 @@ -290,7 +290,7 @@ type distinctScoreFilter struct { // NewDistinctScoreFilter creates a filter that filters all stores that have // lower distinct score than specified store. -func NewDistinctScoreFilter(labels []string, stores []*core.StoreInfo, source *core.StoreInfo) Filter { +func NewDistinctScoreFilter(scope string, labels []string, stores []*core.StoreInfo, source *core.StoreInfo) Filter { newStores := make([]*core.StoreInfo, 0, len(stores)-1) for _, s := range stores { if s.GetID() == source.GetID() { @@ -300,12 +300,17 @@ func NewDistinctScoreFilter(labels []string, stores []*core.StoreInfo, source *c } return &distinctScoreFilter{ + scope: scope, labels: labels, stores: newStores, safeScore: DistinctScore(labels, newStores, source), } } +func (f *distinctScoreFilter) Scope() string { + return f.scope +} + func (f *distinctScoreFilter) Type() string { return "distinct-filter" } @@ -319,19 +324,25 @@ func (f *distinctScoreFilter) FilterTarget(opt Options, store *core.StoreInfo) b } type namespaceFilter struct { + scope string classifier namespace.Classifier namespace string } // NewNamespaceFilter creates a Filter that filters all stores that are not // belong to a namespace. -func NewNamespaceFilter(classifier namespace.Classifier, namespace string) Filter { +func NewNamespaceFilter(scope string, classifier namespace.Classifier, namespace string) Filter { return &namespaceFilter{ + scope: scope, classifier: classifier, namespace: namespace, } } +func (f *namespaceFilter) Scope() string { + return f.scope +} + func (f *namespaceFilter) Type() string { return "namespace-filter" } @@ -348,35 +359,21 @@ func (f *namespaceFilter) FilterTarget(opt Options, store *core.StoreInfo) bool return f.filter(store) } -type rejectLeaderFilter struct{} - -// NewRejectLeaderFilter creates a Filter that filters stores that marked as -// rejectLeader from being the target of leader transfer. -func NewRejectLeaderFilter() Filter { - return rejectLeaderFilter{} -} - -func (f rejectLeaderFilter) Type() string { - return "reject-leader-filter" -} - -func (f rejectLeaderFilter) FilterSource(opt Options, store *core.StoreInfo) bool { - return false -} - -func (f rejectLeaderFilter) FilterTarget(opt Options, store *core.StoreInfo) bool { - return opt.CheckLabelProperty(RejectLeader, store.GetLabels()) -} - // StoreStateFilter is used to determine whether a store can be selected as the // source or target of the schedule based on the store's state. type StoreStateFilter struct { + ActionScope string // Set true if the schedule involves any transfer leader operation. TransferLeader bool // Set true if the schedule involves any move region operation. MoveRegion bool } +// Scope returns the scheduler or the checker which the filter acts on. +func (f StoreStateFilter) Scope() string { + return f.ActionScope +} + // Type returns the type of the Filter. func (f StoreStateFilter) Type() string { return "store-state-filter" diff --git a/server/schedule/filters_test.go b/server/schedule/filters_test.go index 742e37f07a9..bcedfde8df3 100644 --- a/server/schedule/filters_test.go +++ b/server/schedule/filters_test.go @@ -25,7 +25,7 @@ var _ = Suite(&testFiltersSuite{}) type testFiltersSuite struct{} func (s *testReplicationSuite) TestPendingPeerFilter(c *C) { - filter := NewPendingPeerCountFilter() + filter := NewPendingPeerCountFilter("") opt := mockoption.NewScheduleOptions() tc := mockcluster.NewCluster(opt) store := core.NewStoreInfo(&metapb.Store{Id: 1}) diff --git a/server/schedule/metrics.go b/server/schedule/metrics.go index 6154dc5bae4..71166c7de68 100644 --- a/server/schedule/metrics.go +++ b/server/schedule/metrics.go @@ -39,7 +39,7 @@ var ( Subsystem: "schedule", Name: "filter", Help: "Counter of the filter", - }, []string{"action", "address", "store", "type"}) + }, []string{"action", "address", "store", "scope", "type"}) operatorCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/server/schedule/namespace_checker.go b/server/schedule/namespace_checker.go index 46c836f454a..b6a8ad38c37 100644 --- a/server/schedule/namespace_checker.go +++ b/server/schedule/namespace_checker.go @@ -21,8 +21,11 @@ import ( "go.uber.org/zap" ) +const namespaceCheckerName = "namespace-checker" + // NamespaceChecker ensures region to go to the right place. type NamespaceChecker struct { + name string cluster Cluster filters []Filter classifier namespace.Classifier @@ -31,10 +34,11 @@ type NamespaceChecker struct { // NewNamespaceChecker creates a namespace checker. func NewNamespaceChecker(cluster Cluster, classifier namespace.Classifier) *NamespaceChecker { filters := []Filter{ - StoreStateFilter{MoveRegion: true}, + StoreStateFilter{ActionScope: namespaceCheckerName, MoveRegion: true}, } return &NamespaceChecker{ + name: namespaceCheckerName, cluster: cluster, filters: filters, classifier: classifier, @@ -102,7 +106,7 @@ func (n *NamespaceChecker) SelectBestPeerToRelocate(region *core.RegionInfo, tar // SelectBestStoreToRelocate randomly returns the store to relocate func (n *NamespaceChecker) SelectBestStoreToRelocate(region *core.RegionInfo, targets []*core.StoreInfo) uint64 { selector := NewRandomSelector(n.filters) - target := selector.SelectTarget(n.cluster, targets, NewExcludedFilter(nil, region.GetStoreIds())) + target := selector.SelectTarget(n.cluster, targets, NewExcludedFilter(n.name, nil, region.GetStoreIds())) if target == nil { return 0 } @@ -120,7 +124,7 @@ func (n *NamespaceChecker) isExists(stores []*core.StoreInfo, storeID uint64) bo func (n *NamespaceChecker) getNamespaceStores(region *core.RegionInfo) []*core.StoreInfo { ns := n.classifier.GetRegionNamespace(region) - filteredStores := n.filter(n.cluster.GetStores(), NewNamespaceFilter(n.classifier, ns)) + filteredStores := n.filter(n.cluster.GetStores(), NewNamespaceFilter(n.name, n.classifier, ns)) return filteredStores } diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index ea3d22fe308..b53e8177178 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -23,6 +23,8 @@ import ( "github.com/pkg/errors" ) +const regionScatterName = "region-scatter" + type selectedStores struct { mu sync.Mutex stores map[uint64]struct{} @@ -50,18 +52,19 @@ func (s *selectedStores) reset() { s.stores = make(map[uint64]struct{}) } -func (s *selectedStores) newFilter() Filter { +func (s *selectedStores) newFilter(scope string) Filter { s.mu.Lock() defer s.mu.Unlock() cloned := make(map[uint64]struct{}) for id := range s.stores { cloned[id] = struct{}{} } - return NewExcludedFilter(nil, cloned) + return NewExcludedFilter(scope, nil, cloned) } // RegionScatterer scatters regions. type RegionScatterer struct { + name string cluster Cluster classifier namespace.Classifier filters []Filter @@ -72,10 +75,11 @@ type RegionScatterer struct { // RegionScatter is used for the `Lightning`, it will scatter the specified regions before import data. func NewRegionScatterer(cluster Cluster, classifier namespace.Classifier) *RegionScatterer { return &RegionScatterer{ + name: regionScatterName, cluster: cluster, classifier: classifier, filters: []Filter{ - StoreStateFilter{}, + StoreStateFilter{ActionScope: regionScatterName}, }, selected: newSelectedStores(), } @@ -140,7 +144,7 @@ func (r *RegionScatterer) selectPeerToReplace(stores map[uint64]*core.StoreInfo, // scoreGuard guarantees that the distinct score will not decrease. regionStores := r.cluster.GetRegionStores(region) sourceStore := r.cluster.GetStore(oldPeer.GetStoreId()) - scoreGuard := NewDistinctScoreFilter(r.cluster.GetLocationLabels(), regionStores, sourceStore) + scoreGuard := NewDistinctScoreFilter(r.name, r.cluster.GetLocationLabels(), regionStores, sourceStore) candidates := make([]*core.StoreInfo, 0, len(stores)) for _, store := range stores { @@ -165,9 +169,9 @@ func (r *RegionScatterer) selectPeerToReplace(stores map[uint64]*core.StoreInfo, func (r *RegionScatterer) collectAvailableStores(region *core.RegionInfo) map[uint64]*core.StoreInfo { namespace := r.classifier.GetRegionNamespace(region) filters := []Filter{ - r.selected.newFilter(), - NewExcludedFilter(nil, region.GetStoreIds()), - NewNamespaceFilter(r.classifier, namespace), + r.selected.newFilter(r.name), + NewExcludedFilter(r.name, nil, region.GetStoreIds()), + NewNamespaceFilter(r.name, r.classifier, namespace), } filters = append(filters, r.filters...) diff --git a/server/schedule/replica_checker.go b/server/schedule/replica_checker.go index f81e64d4f65..779f0abb0c1 100644 --- a/server/schedule/replica_checker.go +++ b/server/schedule/replica_checker.go @@ -23,26 +23,35 @@ import ( "go.uber.org/zap" ) +const replicaCheckerName = "replica-checker" + // ReplicaChecker ensures region has the best replicas. // Including the following: // Replica number management. // Unhealth replica management, mainly used for disaster recovery of TiKV. // Location management, mainly used for cross data center deployment. type ReplicaChecker struct { + name string cluster Cluster classifier namespace.Classifier filters []Filter } // NewReplicaChecker creates a replica checker. -func NewReplicaChecker(cluster Cluster, classifier namespace.Classifier) *ReplicaChecker { +func NewReplicaChecker(cluster Cluster, classifier namespace.Classifier, n ...string) *ReplicaChecker { + name := replicaCheckerName + if len(n) != 0 { + name = n[0] + } filters := []Filter{ - NewOverloadFilter(), - NewHealthFilter(), - NewSnapshotCountFilter(), + NewOverloadFilter(name), + NewHealthFilter(name), + NewSnapshotCountFilter(name), + NewPendingPeerCountFilter(name), } return &ReplicaChecker{ + name: name, cluster: cluster, classifier: classifier, filters: filters, @@ -65,7 +74,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { if len(region.GetPeers()) < r.cluster.GetMaxReplicas() && r.cluster.IsMakeUpReplicaEnabled() { log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) - newPeer, _ := r.selectBestPeerToAddReplica(region, NewStorageThresholdFilter()) + newPeer, _ := r.selectBestPeerToAddReplica(region, NewStorageThresholdFilter(r.name)) if newPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_target_store").Inc() return nil @@ -97,7 +106,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { // SelectBestReplacementStore returns a store id that to be used to replace the old peer and distinct score. func (r *ReplicaChecker) SelectBestReplacementStore(region *core.RegionInfo, oldPeer *metapb.Peer, filters ...Filter) (uint64, float64) { - filters = append(filters, NewExcludedFilter(nil, region.GetStoreIds())) + filters = append(filters, NewExcludedFilter(r.name, nil, region.GetStoreIds())) newRegion := region.Clone(core.WithRemoveStorePeer(oldPeer.GetStoreId())) return r.selectBestStoreToAddReplica(newRegion, filters...) } @@ -120,14 +129,14 @@ func (r *ReplicaChecker) selectBestPeerToAddReplica(region *core.RegionInfo, fil func (r *ReplicaChecker) selectBestStoreToAddReplica(region *core.RegionInfo, filters ...Filter) (uint64, float64) { // Add some must have filters. newFilters := []Filter{ - NewStateFilter(), - NewPendingPeerCountFilter(), - NewExcludedFilter(nil, region.GetStoreIds()), + NewStateFilter(r.name), + NewPendingPeerCountFilter(r.name), + NewExcludedFilter(r.name, nil, region.GetStoreIds()), } filters = append(filters, r.filters...) filters = append(filters, newFilters...) if r.classifier != nil { - filters = append(filters, NewNamespaceFilter(r.classifier, r.classifier.GetRegionNamespace(region))) + filters = append(filters, NewNamespaceFilter(r.name, r.classifier, r.classifier.GetRegionNamespace(region))) } regionStores := r.cluster.GetRegionStores(region) selector := NewReplicaSelector(regionStores, r.cluster.GetLocationLabels(), r.filters...) @@ -213,7 +222,7 @@ func (r *ReplicaChecker) checkBestReplacement(region *core.RegionInfo) *Operator checkerCounter.WithLabelValues("replica_checker", "all_right").Inc() return nil } - storeID, newScore := r.SelectBestReplacementStore(region, oldPeer, NewStorageThresholdFilter()) + storeID, newScore := r.SelectBestReplacementStore(region, oldPeer, NewStorageThresholdFilter(r.name)) if storeID == 0 { checkerCounter.WithLabelValues("replica_checker", "no_replacement_store").Inc() return nil @@ -263,7 +272,7 @@ func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, peer *metapb.Peer, sta return op } - storeID, _ := r.SelectBestReplacementStore(region, peer, NewStorageThresholdFilter()) + storeID, _ := r.SelectBestReplacementStore(region, peer, NewStorageThresholdFilter(r.name)) if storeID == 0 { log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil diff --git a/server/schedulers/adjacent_region.go b/server/schedulers/adjacent_region.go index 53fc2850f4a..0e84afa07e6 100644 --- a/server/schedulers/adjacent_region.go +++ b/server/schedulers/adjacent_region.go @@ -31,6 +31,7 @@ const ( defaultAdjacentLeaderLimit = 64 minAdjacentSchedulerInterval = time.Second maxAdjacentSchedulerInterval = 30 * time.Second + balanceAdjacentRegionName = "balance-adjacent-region-scheduler" ) func init() { @@ -64,6 +65,7 @@ func init() { // 2. the two regions' leader will not in the public store of this two regions type balanceAdjacentRegionScheduler struct { *baseScheduler + name string selector *schedule.RandomSelector leaderLimit uint64 peerLimit uint64 @@ -92,11 +94,12 @@ func (a *adjacentState) len() int { // on each store. func newBalanceAdjacentRegionScheduler(opController *schedule.OperatorController, args ...uint64) schedule.Scheduler { filters := []schedule.Filter{ - schedule.StoreStateFilter{TransferLeader: true, MoveRegion: true}, + schedule.StoreStateFilter{ActionScope: balanceAdjacentRegionName, TransferLeader: true, MoveRegion: true}, } base := newBaseScheduler(opController) s := &balanceAdjacentRegionScheduler{ baseScheduler: base, + name: balanceAdjacentRegionName, selector: schedule.NewRandomSelector(filters), leaderLimit: defaultAdjacentLeaderLimit, peerLimit: defaultAdjacentPeerLimit, @@ -113,7 +116,7 @@ func newBalanceAdjacentRegionScheduler(opController *schedule.OperatorController } func (l *balanceAdjacentRegionScheduler) GetName() string { - return "balance-adjacent-region-scheduler" + return l.name } func (l *balanceAdjacentRegionScheduler) GetType() string { @@ -287,7 +290,7 @@ func (l *balanceAdjacentRegionScheduler) dispersePeer(cluster schedule.Cluster, return nil } - scoreGuard := schedule.NewDistinctScoreFilter(cluster.GetLocationLabels(), stores, source) + scoreGuard := schedule.NewDistinctScoreFilter(l.GetName(), cluster.GetLocationLabels(), stores, source) excludeStores := region.GetStoreIds() for _, storeID := range l.cacheRegions.assignedStoreIds { if _, ok := excludeStores[storeID]; !ok { @@ -296,7 +299,7 @@ func (l *balanceAdjacentRegionScheduler) dispersePeer(cluster schedule.Cluster, } filters := []schedule.Filter{ - schedule.NewExcludedFilter(nil, excludeStores), + schedule.NewExcludedFilter(l.GetName(), nil, excludeStores), scoreGuard, } target := l.selector.SelectTarget(cluster, cluster.GetStores(), filters...) diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index 5ba1ea79164..36655036e32 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -46,15 +46,10 @@ type balanceLeaderScheduler struct { // each store balanced. func newBalanceLeaderScheduler(opController *schedule.OperatorController, opts ...BalanceLeaderCreateOption) schedule.Scheduler { taintStores := newTaintCache() - filters := []schedule.Filter{ - schedule.StoreStateFilter{TransferLeader: true}, - schedule.NewCacheFilter(taintStores), - } base := newBaseScheduler(opController) s := &balanceLeaderScheduler{ baseScheduler: base, - selector: schedule.NewBalanceSelector(core.LeaderKind, filters), taintStores: taintStores, opController: opController, counter: balanceLeaderCounter, @@ -62,6 +57,11 @@ func newBalanceLeaderScheduler(opController *schedule.OperatorController, opts . for _, opt := range opts { opt(s) } + filters := []schedule.Filter{ + schedule.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true}, + schedule.NewCacheFilter(s.GetName(), taintStores), + } + s.selector = schedule.NewBalanceSelector(core.LeaderKind, filters) return s } @@ -218,8 +218,9 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, schedulerCounter.WithLabelValues(l.GetName(), "new_operator").Inc() sourceLabel := strconv.FormatUint(sourceID, 10) targetLabel := strconv.FormatUint(targetID, 10) - l.counter.WithLabelValues("move_leader", source.GetAddress()+"-out", sourceLabel).Inc() - l.counter.WithLabelValues("move_leader", target.GetAddress()+"-in", targetLabel).Inc() + l.counter.WithLabelValues("move-leader", source.GetAddress()+"-out", sourceLabel).Inc() + l.counter.WithLabelValues("move-leader", target.GetAddress()+"-in", targetLabel).Inc() + balanceDirectionCounter.WithLabelValues(l.GetName(), sourceLabel, targetLabel).Inc() op := schedule.CreateTransferLeaderOperator("balance-leader", region, region.GetLeader().GetStoreId(), targetID, schedule.OpBalance) return []*schedule.Operator{op} } diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index c6360da07dd..43c4ccb6a91 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -47,21 +47,21 @@ type balanceRegionScheduler struct { // each store balanced. func newBalanceRegionScheduler(opController *schedule.OperatorController, opts ...BalanceRegionCreateOption) schedule.Scheduler { taintStores := newTaintCache() - filters := []schedule.Filter{ - schedule.StoreStateFilter{MoveRegion: true}, - schedule.NewCacheFilter(taintStores), - } base := newBaseScheduler(opController) s := &balanceRegionScheduler{ baseScheduler: base, - selector: schedule.NewBalanceSelector(core.RegionKind, filters), - taintStores: taintStores, opController: opController, counter: balanceRegionCounter, + taintStores: taintStores, } for _, opt := range opts { opt(s) } + filters := []schedule.Filter{ + schedule.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, + schedule.NewCacheFilter(s.GetName(), taintStores), + } + s.selector = schedule.NewBalanceSelector(core.RegionKind, filters) return s } @@ -179,9 +179,9 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * // scoreGuard guarantees that the distinct score will not decrease. stores := cluster.GetRegionStores(region) source := cluster.GetStore(oldPeer.GetStoreId()) - scoreGuard := schedule.NewDistinctScoreFilter(cluster.GetLocationLabels(), stores, source) + scoreGuard := schedule.NewDistinctScoreFilter(s.GetName(), cluster.GetLocationLabels(), stores, source) - checker := schedule.NewReplicaChecker(cluster, nil) + checker := schedule.NewReplicaChecker(cluster, nil, s.GetName()) storeID, _ := checker.SelectBestReplacementStore(region, oldPeer, scoreGuard) if storeID == 0 { schedulerCounter.WithLabelValues(s.GetName(), "no_replacement").Inc() @@ -218,9 +218,9 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * } sourceLabel := strconv.FormatUint(sourceID, 10) targetLabel := strconv.FormatUint(targetID, 10) - s.counter.WithLabelValues("move_peer", source.GetAddress()+"-out", sourceLabel).Inc() - s.counter.WithLabelValues("move_peer", target.GetAddress()+"-in", targetLabel).Inc() - s.counter.WithLabelValues("direction", "from_to", sourceLabel+"-"+targetLabel).Inc() + s.counter.WithLabelValues("move-peer", source.GetAddress()+"-out", sourceLabel).Inc() + s.counter.WithLabelValues("move-peer", target.GetAddress()+"-in", targetLabel).Inc() + balanceDirectionCounter.WithLabelValues(s.GetName(), sourceLabel, targetLabel).Inc() return op } @@ -231,8 +231,8 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * // which may recover soon. func (s *balanceRegionScheduler) hasPotentialTarget(cluster schedule.Cluster, region *core.RegionInfo, source *core.StoreInfo, opInfluence schedule.OpInfluence) bool { filters := []schedule.Filter{ - schedule.NewExcludedFilter(nil, region.GetStoreIds()), - schedule.NewDistinctScoreFilter(cluster.GetLocationLabels(), cluster.GetRegionStores(region), source), + schedule.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds()), + schedule.NewDistinctScoreFilter(s.GetName(), cluster.GetLocationLabels(), cluster.GetRegionStores(region), source), } for _, store := range cluster.GetStores() { diff --git a/server/schedulers/evict_leader.go b/server/schedulers/evict_leader.go index 2ae7de7838e..7ef68013e28 100644 --- a/server/schedulers/evict_leader.go +++ b/server/schedulers/evict_leader.go @@ -45,13 +45,14 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *schedule.OperatorController, storeID uint64) schedule.Scheduler { + name := fmt.Sprintf("evict-leader-scheduler-%d", storeID) filters := []schedule.Filter{ - schedule.StoreStateFilter{TransferLeader: true}, + schedule.StoreStateFilter{ActionScope: name, TransferLeader: true}, } base := newBaseScheduler(opController) return &evictLeaderScheduler{ baseScheduler: base, - name: fmt.Sprintf("evict-leader-scheduler-%d", storeID), + name: name, storeID: storeID, selector: schedule.NewRandomSelector(filters), } diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index f6163f72f61..2dc930fdd28 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -44,6 +44,7 @@ const ( hotRegionLimitFactor = 0.75 storeHotRegionsDefaultLen = 100 hotRegionScheduleFactor = 0.9 + balanceHotRegionName = "balance-hot-region-scheduler" ) // BalanceType : the perspective of balance @@ -69,6 +70,7 @@ func newStoreStaticstics() *storeStatistics { } type balanceHotRegionsScheduler struct { + name string *baseScheduler sync.RWMutex leaderLimit uint64 @@ -83,6 +85,7 @@ type balanceHotRegionsScheduler struct { func newBalanceHotRegionsScheduler(opController *schedule.OperatorController) *balanceHotRegionsScheduler { base := newBaseScheduler(opController) return &balanceHotRegionsScheduler{ + name: balanceHotRegionName, baseScheduler: base, leaderLimit: 1, peerLimit: 1, @@ -117,7 +120,7 @@ func newBalanceHotWriteRegionsScheduler(opController *schedule.OperatorControlle } func (h *balanceHotRegionsScheduler) GetName() string { - return "balance-hot-region-scheduler" + return h.name } func (h *balanceHotRegionsScheduler) GetType() string { @@ -305,10 +308,13 @@ func (h *balanceHotRegionsScheduler) balanceByPeer(cluster schedule.Cluster, sto } srcStore := cluster.GetStore(srcStoreID) + if srcStore == nil { + log.Error("failed to get the source store", zap.Uint64("store-id", srcStoreID)) + } filters := []schedule.Filter{ - schedule.StoreStateFilter{MoveRegion: true}, - schedule.NewExcludedFilter(srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), - schedule.NewDistinctScoreFilter(cluster.GetLocationLabels(), cluster.GetRegionStores(srcRegion), srcStore), + schedule.StoreStateFilter{ActionScope: h.GetName(), MoveRegion: true}, + schedule.NewExcludedFilter(h.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), + schedule.NewDistinctScoreFilter(h.GetName(), cluster.GetLocationLabels(), cluster.GetRegionStores(srcRegion), srcStore), } candidateStoreIDs := make([]uint64, 0, len(stores)) for _, store := range stores { @@ -367,7 +373,7 @@ func (h *balanceHotRegionsScheduler) balanceByLeader(cluster schedule.Cluster, s continue } - filters := []schedule.Filter{schedule.StoreStateFilter{TransferLeader: true}} + filters := []schedule.Filter{schedule.StoreStateFilter{ActionScope: h.GetName(), TransferLeader: true}} candidateStoreIDs := make([]uint64, 0, len(srcRegion.GetPeers())-1) for _, store := range cluster.GetFollowerStores(srcRegion) { if !schedule.FilterTarget(cluster, store, filters) { diff --git a/server/schedulers/label.go b/server/schedulers/label.go index 90066a0fc72..f07f793ed00 100644 --- a/server/schedulers/label.go +++ b/server/schedulers/label.go @@ -26,7 +26,10 @@ func init() { }) } +const labelSchedulerName = "label-scheduler" + type labelScheduler struct { + name string *baseScheduler selector *schedule.BalanceSelector } @@ -36,16 +39,17 @@ type labelScheduler struct { // the store with the specific label. func newLabelScheduler(opController *schedule.OperatorController) schedule.Scheduler { filters := []schedule.Filter{ - schedule.StoreStateFilter{TransferLeader: true}, + schedule.StoreStateFilter{ActionScope: labelSchedulerName, TransferLeader: true}, } return &labelScheduler{ + name: labelSchedulerName, baseScheduler: newBaseScheduler(opController), selector: schedule.NewBalanceSelector(core.LeaderKind, filters), } } func (s *labelScheduler) GetName() string { - return "label-scheduler" + return s.name } func (s *labelScheduler) GetType() string { @@ -80,8 +84,8 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster) []*schedule.Operator for _, p := range region.GetPendingPeers() { excludeStores[p.GetStoreId()] = struct{}{} } - filter := schedule.NewExcludedFilter(nil, excludeStores) - target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region), filter) + f := schedule.NewExcludedFilter(s.GetName(), nil, excludeStores) + target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region), f) if target == nil { log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "no_target").Inc() diff --git a/server/schedulers/metrics.go b/server/schedulers/metrics.go index 1bae69ec643..c9e649fab22 100644 --- a/server/schedulers/metrics.go +++ b/server/schedulers/metrics.go @@ -47,6 +47,14 @@ var balanceRegionCounter = prometheus.NewCounterVec( Help: "Counter of balance region scheduler.", }, []string{"type", "address", "store"}) +var balanceDirectionCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "pd", + Subsystem: "scheduler", + Name: "balance_direction", + Help: "Counter of direction of balance related schedulers.", + }, []string{"type", "source", "target"}) + var scatterRangeLeaderCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "pd", @@ -68,6 +76,7 @@ func init() { prometheus.MustRegister(schedulerStatus) prometheus.MustRegister(balanceLeaderCounter) prometheus.MustRegister(balanceRegionCounter) + prometheus.MustRegister(balanceDirectionCounter) prometheus.MustRegister(scatterRangeLeaderCounter) prometheus.MustRegister(scatterRangeRegionCounter) } diff --git a/server/schedulers/random_merge.go b/server/schedulers/random_merge.go index 0d0b161ef48..7b1af2d8573 100644 --- a/server/schedulers/random_merge.go +++ b/server/schedulers/random_merge.go @@ -26,7 +26,10 @@ func init() { }) } +const randomMergeName = "random-merge-scheduler" + type randomMergeScheduler struct { + name string *baseScheduler selector *schedule.RandomSelector } @@ -35,17 +38,18 @@ type randomMergeScheduler struct { // then merges them. func newRandomMergeScheduler(opController *schedule.OperatorController) schedule.Scheduler { filters := []schedule.Filter{ - schedule.StoreStateFilter{MoveRegion: true}, + schedule.StoreStateFilter{ActionScope: randomMergeName, MoveRegion: true}, } base := newBaseScheduler(opController) return &randomMergeScheduler{ + name: randomMergeName, baseScheduler: base, selector: schedule.NewRandomSelector(filters), } } func (s *randomMergeScheduler) GetName() string { - return "random-merge-scheduler" + return s.name } func (s *randomMergeScheduler) GetType() string { diff --git a/server/schedulers/shuffle_hot_region.go b/server/schedulers/shuffle_hot_region.go index f6a64e7a056..062a7f96331 100644 --- a/server/schedulers/shuffle_hot_region.go +++ b/server/schedulers/shuffle_hot_region.go @@ -107,10 +107,13 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster schedule.Cluster, sto } srcStoreID := srcRegion.GetLeader().GetStoreId() srcStore := cluster.GetStore(srcStoreID) + if srcStore == nil { + log.Error("failed to get the source store", zap.Uint64("store-id", srcStoreID)) + } filters := []schedule.Filter{ - schedule.StoreStateFilter{MoveRegion: true}, - schedule.NewExcludedFilter(srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), - schedule.NewDistinctScoreFilter(cluster.GetLocationLabels(), cluster.GetRegionStores(srcRegion), srcStore), + schedule.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, + schedule.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), + schedule.NewDistinctScoreFilter(s.GetName(), cluster.GetLocationLabels(), cluster.GetRegionStores(srcRegion), srcStore), } stores := cluster.GetStores() destStoreIDs := make([]uint64, 0, len(stores)) diff --git a/server/schedulers/shuffle_leader.go b/server/schedulers/shuffle_leader.go index 464d959e7e5..f9fc5019a58 100644 --- a/server/schedulers/shuffle_leader.go +++ b/server/schedulers/shuffle_leader.go @@ -24,7 +24,10 @@ func init() { }) } +const shuffleLeaderName = "shuffle-leader-scheduler" + type shuffleLeaderScheduler struct { + name string *baseScheduler selector *schedule.RandomSelector } @@ -33,17 +36,18 @@ type shuffleLeaderScheduler struct { // between stores. func newShuffleLeaderScheduler(opController *schedule.OperatorController) schedule.Scheduler { filters := []schedule.Filter{ - schedule.StoreStateFilter{TransferLeader: true}, + schedule.StoreStateFilter{ActionScope: shuffleLeaderName, TransferLeader: true}, } base := newBaseScheduler(opController) return &shuffleLeaderScheduler{ + name: shuffleLeaderName, baseScheduler: base, selector: schedule.NewRandomSelector(filters), } } func (s *shuffleLeaderScheduler) GetName() string { - return "shuffle-leader-scheduler" + return s.name } func (s *shuffleLeaderScheduler) GetType() string { diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index 1a3e994a6e0..80713735b9f 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -27,7 +27,10 @@ func init() { }) } +const shuffleRegionName = "shuffle-region-scheduler" + type shuffleRegionScheduler struct { + name string *baseScheduler selector *schedule.RandomSelector } @@ -36,17 +39,18 @@ type shuffleRegionScheduler struct { // between stores. func newShuffleRegionScheduler(opController *schedule.OperatorController) schedule.Scheduler { filters := []schedule.Filter{ - schedule.StoreStateFilter{MoveRegion: true}, + schedule.StoreStateFilter{ActionScope: shuffleRegionName, MoveRegion: true}, } base := newBaseScheduler(opController) return &shuffleRegionScheduler{ + name: shuffleRegionName, baseScheduler: base, selector: schedule.NewRandomSelector(filters), } } func (s *shuffleRegionScheduler) GetName() string { - return "shuffle-region-scheduler" + return s.name } func (s *shuffleRegionScheduler) GetType() string { @@ -65,7 +69,7 @@ func (s *shuffleRegionScheduler) Schedule(cluster schedule.Cluster) []*schedule. return nil } - excludedFilter := schedule.NewExcludedFilter(nil, region.GetStoreIds()) + excludedFilter := schedule.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds()) newPeer := s.scheduleAddPeer(cluster, excludedFilter) if newPeer == nil { schedulerCounter.WithLabelValues(s.GetName(), "no_new_peer").Inc() diff --git a/server/server.go b/server/server.go index b487129a10e..bd1569c19f8 100644 --- a/server/server.go +++ b/server/server.go @@ -157,7 +157,7 @@ func (s *Server) startEtcd(ctx context.Context) error { if err != nil { return errors.WithStack(err) } - tlsConfig, err := s.cfg.Security.ToTLSConfig() + tlsConfig, err := ToTLSConfig(s.cfg.Security.ConvertToMap()) if err != nil { return err } @@ -727,9 +727,9 @@ func (s *Server) GetClusterVersion() semver.Version { return s.scheduleOpt.loadClusterVersion() } -// GetSecurityConfig get the security config. -func (s *Server) GetSecurityConfig() *SecurityConfig { - return &s.cfg.Security +// GetSecurityConfig get paths of the security config. +func (s *Server) GetSecurityConfig() map[string]string { + return s.cfg.Security.ConvertToMap() } // IsNamespaceExist returns whether the namespace exists. diff --git a/server/statistics/region_collection.go b/server/statistics/region_collection.go index 6afe1299c5b..2d3ffba3e90 100644 --- a/server/statistics/region_collection.go +++ b/server/statistics/region_collection.go @@ -30,6 +30,7 @@ const ( OfflinePeer IncorrectNamespace LearnerPeer + EmptyRegion ) const nonIsolation = "none" @@ -57,6 +58,7 @@ func NewRegionStatistics(opt ScheduleOptions, classifier namespace.Classifier) * r.stats[OfflinePeer] = make(map[uint64]*core.RegionInfo) r.stats[IncorrectNamespace] = make(map[uint64]*core.RegionInfo) r.stats[LearnerPeer] = make(map[uint64]*core.RegionInfo) + r.stats[EmptyRegion] = make(map[uint64]*core.RegionInfo) return r } @@ -109,6 +111,11 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store peerTypeIndex |= LearnerPeer } + if region.GetApproximateSize() <= core.EmptyRegionApproximateSize { + r.stats[EmptyRegion][regionID] = region + peerTypeIndex |= EmptyRegion + } + for _, store := range stores { if store.IsOffline() { peer := region.GetStorePeer(store.GetID()) @@ -142,13 +149,14 @@ func (r *RegionStatistics) ClearDefunctRegion(regionID uint64) { // Collect collects the metrics of the regions' status. func (r *RegionStatistics) Collect() { - regionStatusGauge.WithLabelValues("miss_peer_region_count").Set(float64(len(r.stats[MissPeer]))) - regionStatusGauge.WithLabelValues("extra_peer_region_count").Set(float64(len(r.stats[ExtraPeer]))) - regionStatusGauge.WithLabelValues("down_peer_region_count").Set(float64(len(r.stats[DownPeer]))) - regionStatusGauge.WithLabelValues("pending_peer_region_count").Set(float64(len(r.stats[PendingPeer]))) - regionStatusGauge.WithLabelValues("offline_peer_region_count").Set(float64(len(r.stats[OfflinePeer]))) - regionStatusGauge.WithLabelValues("incorrect_namespace_region_count").Set(float64(len(r.stats[IncorrectNamespace]))) - regionStatusGauge.WithLabelValues("learner_peer_region_count").Set(float64(len(r.stats[LearnerPeer]))) + regionStatusGauge.WithLabelValues("miss-peer-region-count").Set(float64(len(r.stats[MissPeer]))) + regionStatusGauge.WithLabelValues("extra-peer-region-count").Set(float64(len(r.stats[ExtraPeer]))) + regionStatusGauge.WithLabelValues("down-peer-region-count").Set(float64(len(r.stats[DownPeer]))) + regionStatusGauge.WithLabelValues("pending-peer-region-count").Set(float64(len(r.stats[PendingPeer]))) + regionStatusGauge.WithLabelValues("offline-peer-region-count").Set(float64(len(r.stats[OfflinePeer]))) + regionStatusGauge.WithLabelValues("incorrect-namespace-region-count").Set(float64(len(r.stats[IncorrectNamespace]))) + regionStatusGauge.WithLabelValues("learner-peer-region-count").Set(float64(len(r.stats[LearnerPeer]))) + regionStatusGauge.WithLabelValues("empty-region-count").Set(float64(len(r.stats[EmptyRegion]))) } // Reset resets the metrics of the regions' status. diff --git a/server/statistics/region_collection_test.go b/server/statistics/region_collection_test.go index b60fcf55e69..1e322386b3c 100644 --- a/server/statistics/region_collection_test.go +++ b/server/statistics/region_collection_test.go @@ -62,10 +62,12 @@ func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { regionStats.Observe(region1, stores) c.Assert(len(regionStats.stats[ExtraPeer]), Equals, 1) c.Assert(len(regionStats.stats[LearnerPeer]), Equals, 1) + c.Assert(len(regionStats.stats[EmptyRegion]), Equals, 1) region1 = region1.Clone( core.WithDownPeers(downPeers), core.WithPendingPeers(peers[0:1]), + core.SetApproximateSize(144), ) regionStats.Observe(region1, stores) c.Assert(len(regionStats.stats[ExtraPeer]), Equals, 1) @@ -74,6 +76,7 @@ func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { c.Assert(len(regionStats.stats[PendingPeer]), Equals, 1) c.Assert(len(regionStats.stats[LearnerPeer]), Equals, 1) c.Assert(len(regionStats.stats[IncorrectNamespace]), Equals, 1) + c.Assert(len(regionStats.stats[EmptyRegion]), Equals, 0) region2 = region2.Clone(core.WithDownPeers(downPeers[0:1])) regionStats.Observe(region2, stores[0:2]) diff --git a/server/util.go b/server/util.go index f3119a4a1cd..9ce968b21ce 100644 --- a/server/util.go +++ b/server/util.go @@ -281,7 +281,7 @@ func subTimeByWallClock(after time.Time, before time.Time) time.Duration { // InitHTTPClient initials a http client. func InitHTTPClient(svr *Server) error { - tlsConfig, err := svr.GetSecurityConfig().ToTLSConfig() + tlsConfig, err := ToTLSConfig(svr.GetSecurityConfig()) if err != nil { return err }