diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 000000000..674895cd1 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,10 @@ +codecov: + require_ci_to_pass: yes + +coverage: + status: + project: + default: + # Allow the coverage to drop by 3% + threshold: 3% + patch: off diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 000000000..3e4e48b0b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.gitignore b/.gitignore index e104ab6e8..e61a56bde 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ backupmeta *.ngo *.coverprofile coverage.txt +docker/data/ +docker/logs/ diff --git a/LICENSE.md b/LICENSE.md index 675c2ec95..4eedc0116 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -186,7 +186,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -198,4 +198,4 @@ Apache License distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/Makefile b/Makefile index 839a27b9e..779bfdb10 100644 --- a/Makefile +++ b/Makefile @@ -12,25 +12,28 @@ LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRBuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRGitHash=$(shell git rev-parse HEAD)" LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRGitBranch=$(shell git rev-parse --abbrev-ref HEAD)" -all: check test build +ifeq ("$(WITH_RACE)", "1") + RACEFLAG = -race +endif -release: - GO111MODULE=on go build -ldflags '$(LDFLAGS)' -o bin/br +all: check test build build: - GO111MODULE=on go build -ldflags '$(LDFLAGS)' -race -o bin/br + GO111MODULE=on go build -ldflags '$(LDFLAGS)' ${RACEFLAG} -o bin/br build_for_integration_test: GO111MODULE=on go test -c -cover -covermode=count \ -coverpkg=$(BR_PKG)/... \ -o bin/br.test # build key locker - GO111MODULE=on go build -race -o bin/locker tests/br_key_locked/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/locker tests/br_key_locked/*.go # build gc - GO111MODULE=on go build -race -o bin/gc tests/br_z_gc_safepoint/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/gc tests/br_z_gc_safepoint/*.go + # build rawkv client + GO111MODULE=on go build ${RACEFLAG} -o bin/rawkv tests/br_rawkv/*.go test: - GO111MODULE=on go test -race -tags leak ./... + GO111MODULE=on go test ${RACEFLAG} -tags leak ./... testcover: GO111MODULE=on retool do overalls \ @@ -46,6 +49,7 @@ integration_test: build build_for_integration_test @which bin/pd-server @which bin/pd-ctl @which bin/go-ycsb + @which bin/minio @which bin/br tests/run.sh @@ -71,6 +75,12 @@ static: --disable interfacer \ --disable goimports \ --disable gofmt \ + --disable wsl \ + --disable funlen \ + --disable whitespace \ + --disable gocognit \ + --disable godox \ + --disable gomnd \ $$($(PACKAGE_DIRECTORIES)) lint: diff --git a/README.md b/README.md index 55444fdec..408b09749 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,55 @@ Notice BR supports building with Go version `Go >= 1.13` When BR is built successfully, you can find binary in the `bin` directory. +## Quick start + +```sh +# Start TiDB cluster +docker-compose -f docker-compose.yaml rm -s -v && \ +docker-compose -f docker-compose.yaml build && \ +docker-compose -f docker-compose.yaml up --remove-orphans + +# Attach to control container to run BR +docker exec -it br_control_1 bash + +# Load testing data to TiDB +go-ycsb load mysql -p workload=core \ + -p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \ + -p recordcount=100000 -p threadcount=100 + +# How many rows do we get? 100000 rows. +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" + +# Build BR and backup! +make build && \ +bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_backup.log" + +# Let's drop database. +mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" + +# Restore! +bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_restore.log" + +# How many rows do we get again? Expected to be 100000 rows. +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" + +# Test S3 compatible storage (MinIO). +# Create a bucket to save backup by mc (a MinIO Client). +mc config host add minio $S3_ENDPOINT $MINIO_ACCESS_KEY $MINIO_SECRET_KEY && \ +mc mb minio/mybucket + +# Backup to S3 compatible storage. +bin/br backup full --pd pd0:2379 --storage "s3://mybucket/full" \ + --s3.endpoint="$S3_ENDPOINT" + +# Drop database and restore! +mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" && \ +bin/br restore full --pd pd0:2379 --storage "s3://mybucket/full" \ + --s3.endpoint="$S3_ENDPOINT" +``` + ## Contributing Contributions are welcomed and greatly appreciated. See [CONTRIBUTING](./CONTRIBUTING.md) diff --git a/cmd/backup.go b/cmd/backup.go index 39aa4fd28..d37229e0a 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( @@ -5,6 +7,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/spf13/cobra" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" @@ -13,16 +16,27 @@ import ( func runBackupCommand(command *cobra.Command, cmdName string) error { cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } - return task.RunBackup(GetDefaultContext(), cmdName, &cfg) + return task.RunBackup(GetDefaultContext(), tidbGlue, cmdName, &cfg) +} + +func runBackupRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false + return err + } + return task.RunBackupRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. func NewBackupCommand() *cobra.Command { command := &cobra.Command{ - Use: "backup", - Short: "backup a TiDB cluster", + Use: "backup", + Short: "backup a TiDB/TiKV cluster", + SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err @@ -43,6 +57,7 @@ func NewBackupCommand() *cobra.Command { newFullBackupCommand(), newDbBackupCommand(), newTableBackupCommand(), + newRawBackupCommand(), ) task.DefineBackupFlags(command.PersistentFlags()) @@ -87,3 +102,18 @@ func newTableBackupCommand() *cobra.Command { task.DefineTableFlags(command) return command } + +// newRawBackupCommand return a raw kv range backup subcommand. +func newRawBackupCommand() *cobra.Command { + // TODO: remove experimental tag if it's stable + command := &cobra.Command{ + Use: "raw", + Short: "(experimental) backup a raw kv range from TiKV cluster", + RunE: func(command *cobra.Command, _ []string) error { + return runBackupRawCommand(command, "Raw backup") + }, + } + + task.DefineRawBackupFlags(command) + return command +} diff --git a/cmd/cmd.go b/cmd/cmd.go index fdadaa6f8..87a8aadc9 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,11 +1,16 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( "context" "net/http" "net/http/pprof" + "os" + "path/filepath" "sync" "sync/atomic" + "time" "github.com/pingcap/log" "github.com/pingcap/tidb/util/logutil" @@ -13,14 +18,18 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) var ( - initOnce = sync.Once{} - defaultContext context.Context - hasLogFile uint64 + initOnce = sync.Once{} + defaultContext context.Context + hasLogFile uint64 + tidbGlue = gluetidb.Glue{} + envLogToTermKey = "BR_LOG_TO_TERM" ) const ( @@ -37,6 +46,10 @@ const ( flagVersionShort = "V" ) +func timestampLogFileName() string { + return filepath.Join(os.TempDir(), "br.log."+time.Now().Format(time.RFC3339)) +} + // AddFlags adds flags to the given cmd. func AddFlags(cmd *cobra.Command) { cmd.Version = utils.BRInfo() @@ -45,8 +58,8 @@ func AddFlags(cmd *cobra.Command) { cmd.PersistentFlags().StringP(FlagLogLevel, "L", "info", "Set the log level") - cmd.PersistentFlags().String(FlagLogFile, "", - "Set the log file path. If not set, logs will output to stdout") + cmd.PersistentFlags().String(FlagLogFile, timestampLogFileName(), + "Set the log file path. If not set, logs will output to temp file") cmd.PersistentFlags().String(FlagStatusAddr, "", "Set the HTTP listening address for the status report service. Set to empty string to disable") task.DefineCommonFlags(cmd.PersistentFlags()) @@ -69,8 +82,15 @@ func Init(cmd *cobra.Command) (err error) { if err != nil { return } + _, outputLogToTerm := os.LookupEnv(envLogToTermKey) + if outputLogToTerm { + // Log to term if env `BR_LOG_TO_TERM` is set. + conf.File.Filename = "" + } if len(conf.File.Filename) != 0 { atomic.StoreUint64(&hasLogFile, 1) + summary.InitCollector(true) + cmd.Printf("Detial BR log in %s\n", conf.File.Filename) } lg, p, e := log.InitLogger(conf) if e != nil { @@ -84,16 +104,20 @@ func Init(cmd *cobra.Command) (err error) { err = e return } + tidbLogCfg := logutil.LogConfig{} if len(slowLogFilename) != 0 { - slowCfg := logutil.LogConfig{SlowQueryFile: slowLogFilename} - e = logutil.InitLogger(&slowCfg) - if e != nil { - err = e - return - } + tidbLogCfg.SlowQueryFile = slowLogFilename } else { // Hack! Discard slow log by setting log level to PanicLevel logutil.SlowQueryLogger.SetLevel(logrus.PanicLevel) + // Disable annoying TiDB Log. + // TODO: some error logs outputs randomly, we need to fix them in TiDB. + tidbLogCfg.Level = "fatal" + } + e = logutil.InitLogger(&tidbLogCfg) + if e != nil { + err = e + return } // Initialize the pprof server. diff --git a/cmd/restore.go b/cmd/restore.go index 2dfec9846..1e894b4ee 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,9 +1,12 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( "github.com/pingcap/tidb/session" "github.com/spf13/cobra" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" @@ -12,16 +15,39 @@ import ( func runRestoreCommand(command *cobra.Command, cmdName string) error { cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false + return err + } + return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) +} + +func runRestoreRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreRawConfig{ + RawKvConfig: task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}, + } + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } - return task.RunRestore(GetDefaultContext(), cmdName, &cfg) + return task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) +} + +func runRestoreTiflashReplicaCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false + return err + } + + return task.RunRestoreTiflashReplica(GetDefaultContext(), tidbGlue, cmdName, &cfg) } // NewRestoreCommand returns a restore subcommand func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ - Use: "restore", - Short: "restore a TiKV cluster from a backup", + Use: "restore", + Short: "restore a TiDB/TiKV cluster", + SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err @@ -40,6 +66,8 @@ func NewRestoreCommand() *cobra.Command { newFullRestoreCommand(), newDbRestoreCommand(), newTableRestoreCommand(), + newRawRestoreCommand(), + newTiflashReplicaRestoreCommand(), ) task.DefineRestoreFlags(command.PersistentFlags()) @@ -80,3 +108,27 @@ func newTableRestoreCommand() *cobra.Command { task.DefineTableFlags(command) return command } + +func newTiflashReplicaRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "tiflash-replica", + Short: "restore the tiflash replica before the last restore, it must only be used after the last restore failed", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreTiflashReplicaCommand(cmd, "Restore TiFlash Replica") + }, + } + return command +} + +func newRawRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "raw", + Short: "(experimental) restore a raw kv range to TiKV cluster", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreRawCommand(cmd, "Raw restore") + }, + } + + task.DefineRawRestoreFlags(command) + return command +} diff --git a/cmd/validate.go b/cmd/validate.go index 559cb9983..1ee0c6b17 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( @@ -14,11 +16,12 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/pd/pkg/mock/mockid" + "github.com/pingcap/pd/v3/pkg/mock/mockid" "github.com/spf13/cobra" "go.uber.org/zap" "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -26,8 +29,9 @@ import ( // NewValidateCommand return a debug subcommand. func NewValidateCommand() *cobra.Command { meta := &cobra.Command{ - Use: "validate ", - Short: "commands to check/debug backup data", + Use: "validate ", + Short: "commands to check/debug backup data", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err @@ -59,7 +63,7 @@ func newCheckSumCommand() *cobra.Command { return err } - _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { return err } @@ -147,7 +151,7 @@ func newBackupMetaCommand() *cobra.Command { if err = cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - _, _, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) + _, _, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { log.Error("read backupmeta failed", zap.Error(err)) return err @@ -166,15 +170,15 @@ func newBackupMetaCommand() *cobra.Command { tables = append(tables, db.Tables...) } // Check if the ranges of files overlapped - rangeTree := restore.NewRangeTree() + rangeTree := rtree.NewRangeTree() for _, file := range files { - if out := rangeTree.InsertRange(restore.Range{ + if out := rangeTree.InsertRange(rtree.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }); out != nil { log.Error( "file ranges overlapped", - zap.Stringer("out", out.(*restore.Range)), + zap.Stringer("out", out), zap.Stringer("file", file), ) } @@ -196,19 +200,19 @@ func newBackupMetaCommand() *cobra.Command { newTable := new(model.TableInfo) tableID, _ := tableIDAllocator.Alloc() newTable.ID = int64(tableID) - newTable.Name = table.Schema.Name - newTable.Indices = make([]*model.IndexInfo, len(table.Schema.Indices)) - for i, indexInfo := range table.Schema.Indices { + newTable.Name = table.Info.Name + newTable.Indices = make([]*model.IndexInfo, len(table.Info.Indices)) + for i, indexInfo := range table.Info.Indices { indexID, _ := indexIDAllocator.Alloc() newTable.Indices[i] = &model.IndexInfo{ ID: int64(indexID), Name: indexInfo.Name, } } - rules := restore.GetRewriteRules(newTable, table.Schema, 0) + rules := restore.GetRewriteRules(newTable, table.Info, 0) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) - tableIDMap[table.Schema.ID] = int64(tableID) + tableIDMap[table.Info.ID] = int64(tableID) } // Validate rewrite rules for _, file := range files { @@ -238,7 +242,7 @@ func decodeBackupMetaCommand() *cobra.Command { if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { return err } diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 000000000..ab6360d6d --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,216 @@ +--- +# Source: tidb-docker-compose/templates/docker-compose.yml +version: '3.2' + +services: + control: + image: control:latest + build: + context: . + dockerfile: ./docker/Dockerfile + volumes: + - ./docker/data:/data + - ./docker/logs:/tmp + command: -c "/usr/bin/tail -f /dev/null" + depends_on: + - "tidb" + restart: on-failure + env_file: + - ./docker/minio.env + + pd0: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./docker/config/pd.toml:/pd.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd0 + - --config=/pd.toml + - --log-file=/logs/pd0.log + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv0: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv0.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + env_file: + - ./docker/minio.env + + tikv1: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv1:20160 + - --data-dir=/data/tikv1 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv1.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + env_file: + - ./docker/minio.env + + tikv2: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv2:20160 + - --data-dir=/data/tikv2 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv2.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + env_file: + - ./docker/minio.env + + tikv3: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv3:20160 + - --data-dir=/data/tikv3 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv3.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + env_file: + - ./docker/minio.env + + tikv4: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv4:20160 + - --data-dir=/data/tikv4 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv4.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + env_file: + - ./docker/minio.env + + tidb: + image: pingcap/tidb:latest + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./docker/config/tidb.toml:/tidb.toml:ro + - ./docker/logs:/logs + command: + - --store=tikv + - --path=pd0:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + - --advertise-address=tidb + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + - "tikv3" + - "tikv4" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + minio: + image: minio/minio + ports: + - 24927:24927 + volumes: + - ./docker/data/s3:/data/s3 + command: server --address=:24927 /data/s3 + env_file: + - ./docker/minio.env + + tidb-vision: + image: pingcap/tidb-vision:latest + environment: + PD_ENDPOINT: pd0:2379 + ports: + - "8010:8010" + restart: on-failure diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..14c577fcf --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,27 @@ +# For loading data to TiDB +FROM golang:1.13.8-buster as go-ycsb-builder +WORKDIR /go/src/github.com/pingcap/ +RUN git clone https://github.com/pingcap/go-ycsb.git && \ + cd go-ycsb && \ + make + +# For operating minio S3 compatible storage +FROM minio/mc as mc-builder + +FROM golang:1.13.8-buster + +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + curl \ + vim \ + less \ + default-mysql-client \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /go/src/github.com/pingcap/br +COPY . . + +COPY --from=go-ycsb-builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb +COPY --from=mc-builder /usr/bin/mc /usr/bin/mc + +ENTRYPOINT ["/bin/bash"] diff --git a/docker/config/pd.toml b/docker/config/pd.toml new file mode 100644 index 000000000..e6fb173d1 --- /dev/null +++ b/docker/config/pd.toml @@ -0,0 +1,18 @@ +# PD Configuration. +[schedule] +# Disbale Region Merge +max-merge-region-size = 0 +max-merge-region-key = 0 +merge-schedule-limit = 0 + +max-snapshot-count = 10 +max-pending-peer-count = 32 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +[replication] +# The number of replicas for each region. +max-replicas = 3 diff --git a/docker/config/tidb.toml b/docker/config/tidb.toml new file mode 100644 index 000000000..3ef20cc07 --- /dev/null +++ b/docker/config/tidb.toml @@ -0,0 +1,9 @@ +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "360s" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true diff --git a/docker/config/tikv.toml b/docker/config/tikv.toml new file mode 100644 index 000000000..6528e447f --- /dev/null +++ b/docker/config/tikv.toml @@ -0,0 +1,22 @@ +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = true + +[coprocessor] +# Make region split more aggressive. +region-max-keys = 100 +region-split-keys = 80 + +[rocksdb] +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +[raftdb] +max-open-files = 1024 diff --git a/docker/minio.env b/docker/minio.env new file mode 100644 index 000000000..d865b2474 --- /dev/null +++ b/docker/minio.env @@ -0,0 +1,6 @@ +MINIO_ACCESS_KEY=brs3accesskey +MINIO_SECRET_KEY=brs3secretkey +MINIO_BROWSER=off +AWS_ACCESS_KEY_ID=brs3accesskey +AWS_SECRET_ACCESS_KEY=brs3secretkey +S3_ENDPOINT=http://minio:24927 diff --git a/go.mod b/go.mod index 1761ada78..6e330c216 100644 --- a/go.mod +++ b/go.mod @@ -14,31 +14,31 @@ require ( github.com/gogo/protobuf v1.3.1 github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 + github.com/klauspost/cpuid v1.2.0 // indirect github.com/mattn/go-runewidth v0.0.7 // indirect - github.com/onsi/ginkgo v1.10.3 // indirect - github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 + github.com/montanaflynn/stats v0.5.0 // indirect + github.com/onsi/ginkgo v1.11.0 // indirect + github.com/onsi/gomega v1.8.1 // indirect + github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 // indirect - github.com/pingcap/kvproto v0.0.0-20200214082216-7ccc45d0063f - github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20200213042211-e357ed5f237b - github.com/pingcap/pd v1.1.0-beta.0.20200213133706-fbbe75e180e6 - github.com/pingcap/tidb v1.1.0-beta.0.20200218111531-28c9efc12b19 - github.com/pingcap/tidb-tools v4.0.0-beta+incompatible - github.com/pingcap/tipb v0.0.0-20191126033718-169898888b24 + github.com/pingcap/kvproto v0.0.0-20200331072206-c211b473fe43 + github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd + github.com/pingcap/parser v3.1.0-beta.2.0.20200318061433-f0b8f6cdca0d+incompatible + github.com/pingcap/pd/v3 v3.1.0-beta.2.0.20200312100832-1206736bd050 + github.com/pingcap/tidb v1.1.0-beta.0.20200401121410-5854181fbbe0 + github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200317092225-ed6b2a87af54+incompatible + github.com/pingcap/tipb v0.0.0-20200401093201-cc8b75c53383 github.com/prometheus/client_golang v1.0.0 - github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect + github.com/prometheus/common v0.4.1 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 - github.com/spf13/pflag v1.0.3 + github.com/spf13/pflag v1.0.5 + github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect + go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.opencensus.io v0.22.2 // indirect - go.uber.org/atomic v1.5.1 // indirect - go.uber.org/zap v1.13.0 - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect + go.uber.org/zap v1.14.1 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect ) diff --git a/go.sum b/go.sum index 485a32d07..9c95c0044 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,11 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUW github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -67,6 +70,7 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= @@ -75,6 +79,7 @@ github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5je github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= @@ -87,9 +92,11 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= @@ -99,9 +106,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsouza/fake-gcs-server v1.15.0 h1:ss/ztlt10Y64A5qslmxZKsiqW/i28t5DkRtv6qSFaLQ= github.com/fsouza/fake-gcs-server v1.15.0/go.mod h1:HNxAJ/+FY/XSsxuwz8iIYdp2GtMmPbJ8WQjjGMxd6Qk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc= github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -125,17 +130,14 @@ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwoh github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.6/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -143,6 +145,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -151,6 +154,7 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -195,6 +199,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= @@ -207,6 +212,9 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= @@ -228,6 +236,8 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -241,11 +251,11 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -264,6 +274,7 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -279,6 +290,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk= +github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= @@ -288,12 +301,14 @@ github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2 github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= @@ -303,13 +318,12 @@ github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7f github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= -github.com/pingcap-incubator/tidb-dashboard v0.0.0-20200110133619-1c1c65dd8750/go.mod h1:Yx2Ix+adNvCO8F3tHgZmgt9sJhOjJy/B4CW/6filV4w= +github.com/pingcap-incubator/tidb-dashboard v0.0.0-20200218115603-7ab5f06db73d/go.mod h1:MZ8NU1iKeJD2Uyh6zilN7dqakrFq0RjQKpN8hJ42hMw= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= -github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= -github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= -github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -317,29 +331,34 @@ github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0h github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= -github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20200213074014-83e827908584 h1:DhQfXNn9m36b2/4zUfPHDDR6CwS2VONbfPC4s+LMVj0= github.com/pingcap/kvproto v0.0.0-20200213074014-83e827908584/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20200214082216-7ccc45d0063f h1:wi4TNMBfsgiMsOlTSHBq4JKFViabIA1W0d+owiLtp70= -github.com/pingcap/kvproto v0.0.0-20200214082216-7ccc45d0063f/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200317043902-2838e21ca222 h1:y+qDC9hP5ZMQADkVtbGvZOP68NsoYFlt4I3r8QhIvVk= +github.com/pingcap/kvproto v0.0.0-20200317043902-2838e21ca222/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200331072206-c211b473fe43 h1:JFKYB7Y6koAVT0l/f1SDgb0DfFlRZh8ku39fF2NC9aM= +github.com/pingcap/kvproto v0.0.0-20200331072206-c211b473fe43/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200213042211-e357ed5f237b h1:oKql7mOA71N7NxMn3MHtYcxntXrFxNPDMDalF/dW3iM= -github.com/pingcap/parser v0.0.0-20200213042211-e357ed5f237b/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= -github.com/pingcap/pd v1.1.0-beta.0.20200213133706-fbbe75e180e6 h1:6Ut7/Gg6nO2tkrufRncFsI4WnYsmrLI0DN8xcGGOFL8= -github.com/pingcap/pd v1.1.0-beta.0.20200213133706-fbbe75e180e6/go.mod h1:zezAKmc5aqNUREQdxxeP4WuAx22FlPQL/p7xFYKoThU= -github.com/pingcap/tidb v1.1.0-beta.0.20200218111531-28c9efc12b19 h1:0BR+dr+e+LK7dCGpMMyY7pK5KccTl1JxLvS7flQZbOo= -github.com/pingcap/tidb v1.1.0-beta.0.20200218111531-28c9efc12b19/go.mod h1:NtZod8uyqDhHvo5Y85y2SI6rjPcfsDdTkq/Rs4Hkrn0= -github.com/pingcap/tidb-tools v3.0.6-0.20191119150227-ff0a3c6e5763+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= -github.com/pingcap/tidb-tools v4.0.0-beta+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20191126033718-169898888b24 h1:9cdSUluc+Q4yGzGg8AeG46/e8Rw7pJ5jJz9Y4QRNvKE= -github.com/pingcap/tipb v0.0.0-20191126033718-169898888b24/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v3.1.0-beta.1.0.20200318061433-f0b8f6cdca0d+incompatible h1:+Jibmc9uklKz9/prpBggFyjZpqRM8phc1AOOJGxkP48= +github.com/pingcap/parser v3.1.0-beta.1.0.20200318061433-f0b8f6cdca0d+incompatible/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v3.1.0-beta.2.0.20200318061433-f0b8f6cdca0d+incompatible h1:cR7rWBrN8y7Uqk6UBUJ1Ai/EW2DA9yw8t0QmXl6Tr2A= +github.com/pingcap/parser v3.1.0-beta.2.0.20200318061433-f0b8f6cdca0d+incompatible/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/pd/v3 v3.1.0-beta.2.0.20200312100832-1206736bd050 h1:mxPdR0pxnUcRfRGX2JnaLyAd9SZWeR42SzvMp4Zv3YI= +github.com/pingcap/pd/v3 v3.1.0-beta.2.0.20200312100832-1206736bd050/go.mod h1:0HfF1LfWLMuGpui0PKhGvkXxfjv1JslMRY6B+cae3dg= +github.com/pingcap/tidb v1.1.0-beta.0.20200401121410-5854181fbbe0 h1:bcl8cbL0K9oZ+vYWNJIgUH2rMkesjfsbJkpZpD2I+lg= +github.com/pingcap/tidb v1.1.0-beta.0.20200401121410-5854181fbbe0/go.mod h1:2cL/Jdq//AUbt/m/VmOwc7wm82oLFn7o/B6fiQtOpQE= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200317092225-ed6b2a87af54+incompatible h1:tYADqdmWwgDOwf/qEN0trJAy6H3c3Tt/QZx1z4qVrRQ= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200317092225-ed6b2a87af54+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tipb v0.0.0-20200401093201-cc8b75c53383 h1:y1ayhtouCaO0u74JNMN8s20CGJT0yIuAb8UXOYnCALc= +github.com/pingcap/tipb v0.0.0-20200401093201-cc8b75c53383/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -355,22 +374,26 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28= github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.10+incompatible h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto= github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -389,6 +412,8 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -399,20 +424,23 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= -github.com/swaggo/cli v1.20.0/go.mod h1:7jzoQluD0EWMc0rxx6kkPoRNfYNHkNJI/NokjEwJiwM= github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= github.com/swaggo/http-swagger v0.0.0-20200103000832-0e9263c4b516/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= -github.com/swaggo/swag v1.6.4/go.mod h1:3LVbAPI0ekF7sEPuA4XcVsSeVLAxx3hAPD3+O6b1vL4= +github.com/swaggo/swag v1.6.5/go.mod h1:Y7ZLSS0d0DdxhWGVhQdu+Bu1QhaF5k0RD7FKdiAykeY= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q= +github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk= @@ -426,8 +454,11 @@ github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljT github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3 h1:ZsIlNwu/G0zbChIZaWOeZ2TPGNmKMt46jZLXi3e8LFc= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -436,6 +467,7 @@ github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuI github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -445,9 +477,10 @@ go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -455,21 +488,27 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 h1:sKJQZMuxjOAR/Uo2LBfU90onWEf1dF4C+0hPJCc9Mpc= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -487,13 +526,15 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -514,8 +555,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -547,7 +588,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200103143344-a1369afcdac7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= @@ -575,6 +615,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -582,14 +623,14 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a h1:TwMENskLwU2NnWBzrJGEWHq golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310CI4WGPCNPyrLbE7WZA8Y= golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= -golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 h1:0sfSpGSa544Fwnbot3Oxq/U6SXqjty6Jy/3wRhVS7ig= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -611,6 +652,7 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= @@ -620,18 +662,16 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -652,8 +692,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index 103699614..4b369785f 100644 --- a/main.go +++ b/main.go @@ -7,7 +7,6 @@ import ( "os/signal" "syscall" - "github.com/pingcap/errors" "github.com/spf13/cobra" "github.com/pingcap/br/cmd" @@ -53,7 +52,6 @@ func main() { ) rootCmd.SetArgs(os.Args[1:]) if err := rootCmd.Execute(); err != nil { - rootCmd.Println(errors.ErrorStack(err)) os.Exit(1) } } diff --git a/metrics/grafana/br.json b/metrics/grafana/br.json new file mode 100644 index 000000000..d211b4914 --- /dev/null +++ b/metrics/grafana/br.json @@ -0,0 +1,1690 @@ +{ + "__inputs": [ + { + "name": "DS_TEST-CLUSTER", + "label": "test-cluster", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.1.6" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_TEST-CLUSTER}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 16, + "iteration": 1577953179687, + "links": [], + "panels": [ + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 15, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\", name=~\"backup_worker.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "backup-worker", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\", name=~\"backup_endpoint\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "backup-endpoint", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Backup CPU Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} - {{device}}", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IO Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 2, + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 8 + }, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(tikv_backup_error_counter[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{error}} {{instance}}", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Backup Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 9, + "x": 7, + "y": 8 + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_backup_range_size_bytes_sum{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "backup-flow", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "rate(tikv_backup_range_size_bytes_sum[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "metric": "", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "BackupSST Generation Throughput", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_backup_range_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - 99%", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_backup_range_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - 95%", + "refId": "B", + "step": 4 + }, + { + "expr": "sum(rate(tikv_backup_range_duration_seconds_sum{instance=~\"$instance\"}[1m])) by (type) / sum(rate(tikv_backup_range_duration_seconds_count{instance=~\"$instance\"}[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - avg", + "refId": "C", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "One Backup Range Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 15 + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_backup_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": " 99%", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_backup_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "95%", + "refId": "B", + "step": 4 + }, + { + "expr": "sum(rate(tikv_backup_request_duration_seconds_sum{instance=~\"$instance\"}[1m])) / sum(rate(tikv_backup_request_duration_seconds_count{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "avg", + "refId": "C", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "One Backup Subtask Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-100%", + "refId": "E" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-99%", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Checksum Request Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Backup", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 17, + "panels": [], + "title": "Restore", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 2 + }, + "id": 21, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\"}[1m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 2 + }, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} - {{device}}", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IO Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": " \tThe number of leaders on each TiKV instance", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(tikv_raftstore_region_count{instance=~\"$instance\", type=\"leader\"}) by (instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + }, + { + "expr": "delta(tikv_raftstore_region_count{instance=~\"$instance\", type=\"leader\"}[30s]) < -10", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Leader", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "The number of Regions on each TiKV instance", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 10 + }, + "id": 29, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(tikv_raftstore_region_count{instance=~\"$instance\", type=\"region\"}) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Region", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 33, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-99%", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-95%", + "refId": "B" + }, + { + "expr": "sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (type) / sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (type)", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "{{type}}-avg", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_import_ingest_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-99%", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_import_ingest_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-95%", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Process SST Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 31, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_import_download_bytes_sum{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "download-flow", + "refId": "A" + }, + { + "expr": "rate(tikv_import_download_bytes_sum[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DownLoad SST Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(tikv_import_error_counter[1m])", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "{{error}}-{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Restore Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 23, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-100%", + "refId": "E" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-99%", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Checksum Request Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 18, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_TEST-CLUSTER}", + "definition": "label_values(tikv_engine_size_bytes, instance)", + "hide": 0, + "includeAll": true, + "label": "Instance", + "multi": false, + "name": "instance", + "options": [], + "query": "label_values(tikv_engine_size_bytes, instance)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Test-Cluster-Backup & Restore", + "uid": "AzvioWLWz", + "version": 25 +} diff --git a/pkg/backup/check.go b/pkg/backup/check.go new file mode 100644 index 000000000..38b2d927d --- /dev/null +++ b/pkg/backup/check.go @@ -0,0 +1,35 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package backup + +import ( + "encoding/hex" + + "github.com/google/btree" + "github.com/pingcap/log" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" +) + +// checkDupFiles checks if there are any files are duplicated. +func checkDupFiles(rangeTree *rtree.RangeTree) { + // Name -> SHA256 + files := make(map[string][]byte) + rangeTree.Ascend(func(i btree.Item) bool { + rg := i.(*rtree.Range) + for _, f := range rg.Files { + old, ok := files[f.Name] + if ok { + log.Error("dup file", + zap.String("Name", f.Name), + zap.String("SHA256_1", hex.EncodeToString(old)), + zap.String("SHA256_2", hex.EncodeToString(f.Sha256)), + ) + } else { + files[f.Name] = f.Sha256 + } + } + return true + }) +} diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 2d7d8b2e7..abb3dd4e5 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -11,15 +13,16 @@ import ( "github.com/gogo/protobuf/proto" "github.com/google/btree" "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" + kvproto "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" @@ -28,6 +31,9 @@ import ( "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -35,7 +41,7 @@ import ( // ClientMgr manages connections needed by backup. type ClientMgr interface { - GetBackupClient(ctx context.Context, storeID uint64) (backup.BackupClient, error) + GetBackupClient(ctx context.Context, storeID uint64) (kvproto.BackupClient, error) GetPDClient() pd.Client GetTiKV() tikv.Storage GetLockResolver() *tikv.LockResolver @@ -52,9 +58,9 @@ type Client struct { mgr ClientMgr clusterID uint64 - backupMeta backup.BackupMeta + backupMeta kvproto.BackupMeta storage storage.ExternalStorage - backend *backup.StorageBackend + backend *kvproto.StorageBackend } // NewBackupClient returns a new backup client @@ -69,25 +75,33 @@ func NewBackupClient(ctx context.Context, mgr ClientMgr) (*Client, error) { } // GetTS returns the latest timestamp. -func (bc *Client) GetTS(ctx context.Context, duration time.Duration) (uint64, error) { - p, l, err := bc.mgr.GetPDClient().GetTS(ctx) - if err != nil { - return 0, errors.Trace(err) - } - backupTS := oracle.ComposeTS(p, l) - - switch { - case duration < 0: - return 0, errors.New("negative timeago is not allowed") - case duration > 0: - log.Info("backup time ago", zap.Duration("timeago", duration)) - - backupTime := oracle.GetTimeFromTS(backupTS) - backupAgo := backupTime.Add(-duration) - if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { - return 0, errors.New("backup ts overflow please choose a smaller timeago") +func (bc *Client) GetTS(ctx context.Context, duration time.Duration, ts uint64) (uint64, error) { + var ( + backupTS uint64 + err error + ) + if ts > 0 { + backupTS = ts + } else { + p, l, err := bc.mgr.GetPDClient().GetTS(ctx) + if err != nil { + return 0, errors.Trace(err) + } + backupTS = oracle.ComposeTS(p, l) + + switch { + case duration < 0: + return 0, errors.New("negative timeago is not allowed") + case duration > 0: + log.Info("backup time ago", zap.Duration("timeago", duration)) + + backupTime := oracle.GetTimeFromTS(backupTS) + backupAgo := backupTime.Add(-duration) + if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { + return 0, errors.New("backup ts overflow please choose a smaller timeago") + } + backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } - backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } // check backup time do not exceed GCSafePoint @@ -100,7 +114,7 @@ func (bc *Client) GetTS(ctx context.Context, duration time.Duration) (uint64, er } // SetStorage set ExternalStorage for client -func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { +func (bc *Client) SetStorage(ctx context.Context, backend *kvproto.StorageBackend, sendCreds bool) error { var err error bc.storage, err = storage.Create(ctx, backend, sendCreds) if err != nil { @@ -119,7 +133,12 @@ func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend } // SaveBackupMeta saves the current backup meta at the given path. -func (bc *Client) SaveBackupMeta(ctx context.Context) error { +func (bc *Client) SaveBackupMeta(ctx context.Context, ddlJobs []*model.Job) error { + ddlJobsData, err := json.Marshal(ddlJobs) + if err != nil { + return errors.Trace(err) + } + bc.backupMeta.Ddls = ddlJobsData backupMetaData, err := proto.Marshal(&bc.backupMeta) if err != nil { return errors.Trace(err) @@ -127,7 +146,7 @@ func (bc *Client) SaveBackupMeta(ctx context.Context) error { log.Debug("backup meta", zap.Reflect("meta", bc.backupMeta)) backendURL := storage.FormatBackendURL(bc.backend) - log.Info("save backup meta", zap.Stringer("path", &backendURL)) + log.Info("save backup meta", zap.Stringer("path", &backendURL), zap.Int("jobs", len(ddlJobs))) return bc.storage.Write(ctx, utils.MetaFile, backupMetaData) } @@ -173,13 +192,13 @@ func BuildBackupRangeAndSchema( storage kv.Storage, tableFilter *filter.Filter, backupTS uint64, -) ([]Range, *Schemas, error) { +) ([]rtree.Range, *Schemas, error) { info, err := dom.GetSnapshotInfoSchema(backupTS) if err != nil { return nil, nil, errors.Trace(err) } - ranges := make([]Range, 0) + ranges := make([]rtree.Range, 0) backupSchemas := newBackupSchemas() for _, dbInfo := range info.AllSchemas() { // skip system databases @@ -205,6 +224,16 @@ func BuildBackupRangeAndSchema( zap.Stringer("table", tableInfo.Name), zap.Int64("AutoIncID", globalAutoID)) + // remove all non-public indices + n := 0 + for _, index := range tableInfo.Indices { + if index.State == model.StatePublic { + tableInfo.Indices[n] = index + n++ + } + } + tableInfo.Indices = tableInfo.Indices[:n] + if dbData == nil { dbData, err = json.Marshal(dbInfo) if err != nil { @@ -216,7 +245,7 @@ func BuildBackupRangeAndSchema( return nil, nil, errors.Trace(err) } - schema := backup.Schema{ + schema := kvproto.Schema{ Db: dbData, Table: tableData, } @@ -227,7 +256,7 @@ func BuildBackupRangeAndSchema( return nil, nil, err } for _, r := range tableRanges { - ranges = append(ranges, Range{ + ranges = append(ranges, rtree.Range{ StartKey: r.StartKey, EndKey: r.EndKey, }) @@ -236,20 +265,63 @@ func BuildBackupRangeAndSchema( } if backupSchemas.Len() == 0 { - return nil, nil, errors.New("nothing to backup") + log.Info("nothing to backup") + return nil, nil, nil } return ranges, backupSchemas, nil } +// GetBackupDDLJobs returns the ddl jobs are done in (lastBackupTS, backupTS] +func GetBackupDDLJobs(dom *domain.Domain, lastBackupTS, backupTS uint64) ([]*model.Job, error) { + snapMeta, err := dom.GetSnapshotMeta(backupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSnapMeta, err := dom.GetSnapshotMeta(lastBackupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSchemaVersion, err := lastSnapMeta.GetSchemaVersion() + if err != nil { + return nil, errors.Trace(err) + } + allJobs := make([]*model.Job, 0) + defaultJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get default jobs", zap.Int("jobs", len(defaultJobs))) + allJobs = append(allJobs, defaultJobs...) + addIndexJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get add index jobs", zap.Int("jobs", len(addIndexJobs))) + allJobs = append(allJobs, addIndexJobs...) + historyJobs, err := snapMeta.GetAllHistoryDDLJobs() + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get history jobs", zap.Int("jobs", len(historyJobs))) + allJobs = append(allJobs, historyJobs...) + + completedJobs := make([]*model.Job, 0) + for _, job := range allJobs { + if (job.State == model.JobStateDone || job.State == model.JobStateSynced) && + (job.BinlogInfo != nil && job.BinlogInfo.SchemaVersion > lastSchemaVersion) { + completedJobs = append(completedJobs, job) + } + } + log.Debug("get completed jobs", zap.Int("jobs", len(completedJobs))) + return completedJobs, nil +} + // BackupRanges make a backup of the given key ranges. func (bc *Client) BackupRanges( ctx context.Context, - ranges []Range, - lastBackupTS uint64, - backupTS uint64, - rateLimit uint64, - concurrency uint32, - updateCh chan<- struct{}, + ranges []rtree.Range, + req kvproto.BackupRequest, + updateCh glue.Progress, ) error { start := time.Now() defer func() { @@ -262,8 +334,8 @@ func (bc *Client) BackupRanges( defer cancel() go func() { for _, r := range ranges { - err := bc.backupRange( - ctx, r.StartKey, r.EndKey, lastBackupTS, backupTS, rateLimit, concurrency, updateCh) + err := bc.BackupRange( + ctx, r.StartKey, r.EndKey, req, updateCh) if err != nil { errCh <- err return @@ -272,17 +344,24 @@ func (bc *Client) BackupRanges( close(errCh) }() - // Check GC safepoint every 30s. - t := time.NewTicker(time.Second * 30) + // Check GC safepoint every 5s. + t := time.NewTicker(time.Second * 5) defer t.Stop() finished := false for { - err := CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), backupTS) + err := CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.EndVersion) if err != nil { log.Error("check GC safepoint failed", zap.Error(err)) return err } + if req.StartVersion > 0 { + err = CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.StartVersion) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + } if finished { // Return error (if there is any) before finishing backup. return err @@ -302,15 +381,12 @@ func (bc *Client) BackupRanges( } } -// backupRange make a backup of the given key range. -func (bc *Client) backupRange( +// BackupRange make a backup of the given key range. +func (bc *Client) BackupRange( ctx context.Context, startKey, endKey []byte, - lastBackupTS uint64, - backupTS uint64, - rateLimit uint64, - concurrency uint32, - updateCh chan<- struct{}, + req kvproto.BackupRequest, + updateCh glue.Progress, ) (err error) { start := time.Now() defer func() { @@ -320,65 +396,70 @@ func (bc *Client) backupRange( if err != nil { summary.CollectFailureUnit(key, err) } else { - summary.CollectSuccessUnit(key, elapsed) + summary.CollectSuccessUnit(key, 1, elapsed) } }() log.Info("backup started", zap.Binary("StartKey", startKey), zap.Binary("EndKey", endKey), - zap.Uint64("RateLimit", rateLimit), - zap.Uint32("Concurrency", concurrency)) + zap.Uint64("RateLimit", req.RateLimit), + zap.Uint32("Concurrency", req.Concurrency)) ctx, cancel := context.WithCancel(ctx) defer cancel() var allStores []*metapb.Store - allStores, err = bc.mgr.GetPDClient().GetAllStores(ctx, pd.WithExcludeTombstone()) + allStores, err = conn.GetAllTiKVStores(ctx, bc.mgr.GetPDClient(), conn.SkipTiFlash) if err != nil { return errors.Trace(err) } - req := backup.BackupRequest{ - ClusterId: bc.clusterID, - StartKey: startKey, - EndKey: endKey, - StartVersion: lastBackupTS, - EndVersion: backupTS, - StorageBackend: bc.backend, - RateLimit: rateLimit, - Concurrency: concurrency, - } + req.ClusterId = bc.clusterID + req.StartKey = startKey + req.EndKey = endKey + req.StorageBackend = bc.backend + push := newPushDown(ctx, bc.mgr, len(allStores)) - var results RangeTree + var results rtree.RangeTree results, err = push.pushBackup(req, allStores, updateCh) if err != nil { return err } - log.Info("finish backup push down", zap.Int("Ok", results.len())) + log.Info("finish backup push down", zap.Int("Ok", results.Len())) // Find and backup remaining ranges. // TODO: test fine grained backup. err = bc.fineGrainedBackup( - ctx, startKey, endKey, lastBackupTS, - backupTS, rateLimit, concurrency, results, updateCh) + ctx, startKey, endKey, req.StartVersion, + req.EndVersion, req.RateLimit, req.Concurrency, results, updateCh) if err != nil { return err } - bc.backupMeta.StartVersion = lastBackupTS - bc.backupMeta.EndVersion = backupTS - log.Info("backup time range", - zap.Reflect("StartVersion", lastBackupTS), - zap.Reflect("EndVersion", backupTS)) + bc.backupMeta.StartVersion = req.StartVersion + bc.backupMeta.EndVersion = req.EndVersion + bc.backupMeta.IsRawKv = req.IsRawKv + if req.IsRawKv { + bc.backupMeta.RawRanges = append(bc.backupMeta.RawRanges, + &kvproto.RawRange{StartKey: startKey, EndKey: endKey, Cf: req.Cf}) + log.Info("backup raw ranges", + zap.ByteString("startKey", startKey), + zap.ByteString("endKey", endKey), + zap.String("cf", req.Cf)) + } else { + log.Info("backup time range", + zap.Reflect("StartVersion", req.StartVersion), + zap.Reflect("EndVersion", req.EndVersion)) + } - results.tree.Ascend(func(i btree.Item) bool { - r := i.(*Range) + results.Ascend(func(i btree.Item) bool { + r := i.(*rtree.Range) bc.backupMeta.Files = append(bc.backupMeta.Files, r.Files...) return true }) // Check if there are duplicated files. - results.checkDupFiles() + checkDupFiles(&results) return nil } @@ -416,21 +497,21 @@ func (bc *Client) fineGrainedBackup( backupTS uint64, rateLimit uint64, concurrency uint32, - rangeTree RangeTree, - updateCh chan<- struct{}, + rangeTree rtree.RangeTree, + updateCh glue.Progress, ) error { bo := tikv.NewBackoffer(ctx, backupFineGrainedMaxBackoff) for { // Step1, check whether there is any incomplete range - incomplete := rangeTree.getIncompleteRange(startKey, endKey) + incomplete := rangeTree.GetIncompleteRange(startKey, endKey) if len(incomplete) == 0 { return nil } log.Info("start fine grained backup", zap.Int("incomplete", len(incomplete))) // Step2, retry backup on incomplete range - respCh := make(chan *backup.BackupResponse, 4) + respCh := make(chan *kvproto.BackupResponse, 4) errCh := make(chan error, 4) - retry := make(chan Range, 4) + retry := make(chan rtree.Range, 4) max := &struct { ms int @@ -489,10 +570,10 @@ func (bc *Client) fineGrainedBackup( zap.Binary("StartKey", resp.StartKey), zap.Binary("EndKey", resp.EndKey), ) - rangeTree.put(resp.StartKey, resp.EndKey, resp.Files) + rangeTree.Put(resp.StartKey, resp.EndKey, resp.Files) // Update progress - updateCh <- struct{}{} + updateCh.Inc() } } @@ -514,15 +595,15 @@ func (bc *Client) fineGrainedBackup( func onBackupResponse( bo *tikv.Backoffer, lockResolver *tikv.LockResolver, - resp *backup.BackupResponse, -) (*backup.BackupResponse, int, error) { + resp *kvproto.BackupResponse, +) (*kvproto.BackupResponse, int, error) { log.Debug("onBackupResponse", zap.Reflect("resp", resp)) if resp.Error == nil { return resp, 0, nil } backoffMs := 0 switch v := resp.Error.Detail.(type) { - case *backup.Error_KvError: + case *kvproto.Error_KvError: if lockErr := v.KvError.Locked; lockErr != nil { // Try to resolve lock. log.Warn("backup occur kv error", zap.Reflect("error", v)) @@ -540,7 +621,7 @@ func onBackupResponse( log.Error("unexpect kv error", zap.Reflect("KvError", v.KvError)) return nil, backoffMs, errors.Errorf("onBackupResponse error %v", v) - case *backup.Error_RegionError: + case *kvproto.Error_RegionError: regionErr := v.RegionError // Ignore following errors. if !(regionErr.EpochNotMatch != nil || @@ -558,7 +639,7 @@ func onBackupResponse( // TODO: a better backoff. backoffMs = 1000 /* 1s */ return nil, backoffMs, nil - case *backup.Error_ClusterIdError: + case *kvproto.Error_ClusterIdError: log.Error("backup occur cluster ID error", zap.Reflect("error", v)) err := errors.Errorf("%v", resp.Error) @@ -574,12 +655,12 @@ func onBackupResponse( func (bc *Client) handleFineGrained( ctx context.Context, bo *tikv.Backoffer, - rg Range, + rg rtree.Range, lastBackupTS uint64, backupTS uint64, rateLimit uint64, concurrency uint32, - respCh chan<- *backup.BackupResponse, + respCh chan<- *kvproto.BackupResponse, ) (int, error) { leader, pderr := bc.findRegionLeader(ctx, rg.StartKey) if pderr != nil { @@ -588,7 +669,7 @@ func (bc *Client) handleFineGrained( storeID := leader.GetStoreId() max := 0 - req := backup.BackupRequest{ + req := kvproto.BackupRequest{ ClusterId: bc.clusterID, StartKey: rg.StartKey, // TODO: the range may cross region. EndKey: rg.EndKey, @@ -607,7 +688,7 @@ func (bc *Client) handleFineGrained( err = SendBackup( ctx, storeID, client, req, // Handle responses with the same backoffer. - func(resp *backup.BackupResponse) error { + func(resp *kvproto.BackupResponse) error { response, backoffMs, err1 := onBackupResponse(bo, lockResolver, resp) if err1 != nil { @@ -632,9 +713,9 @@ func (bc *Client) handleFineGrained( func SendBackup( ctx context.Context, storeID uint64, - client backup.BackupClient, - req backup.BackupRequest, - respFn func(*backup.BackupResponse) error, + client kvproto.BackupClient, + req kvproto.BackupRequest, + respFn func(*kvproto.BackupResponse) error, ) error { log.Info("try backup", zap.Any("backup request", req)) ctx, cancel := context.WithCancel(ctx) @@ -701,8 +782,8 @@ func (bc *Client) FastChecksum() (bool, error) { totalBytes += file.TotalBytes } - summary.CollectSuccessUnit(summary.TotalKV, totalKvs) - summary.CollectSuccessUnit(summary.TotalBytes, totalBytes) + summary.CollectSuccessUnit(summary.TotalKV, 1, totalKvs) + summary.CollectSuccessUnit(summary.TotalBytes, 1, totalBytes) if schema.Crc64Xor == checksum && schema.TotalKvs == totalKvs && schema.TotalBytes == totalBytes { log.Info("fast checksum success", zap.Stringer("db", dbInfo.Name), zap.Stringer("table", tblInfo.Name)) diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go index e3ad8130b..67ffd6a3e 100644 --- a/pkg/backup/client_test.go +++ b/pkg/backup/client_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -53,7 +55,7 @@ func (r *testBackup) TestGetTS(c *C) { // timeago not work expectedDuration := 0 currentTs := time.Now().UnixNano() / int64(time.Millisecond) - ts, err := r.backupClient.GetTS(r.ctx, 0) + ts, err := r.backupClient.GetTS(r.ctx, 0, 0) c.Assert(err, IsNil) pdTs := oracle.ExtractPhysical(ts) duration := int(currentTs - pdTs) @@ -63,7 +65,7 @@ func (r *testBackup) TestGetTS(c *C) { // timeago = "1.5m" expectedDuration = 90000 currentTs = time.Now().UnixNano() / int64(time.Millisecond) - ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second) + ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second, 0) c.Assert(err, IsNil) pdTs = oracle.ExtractPhysical(ts) duration = int(currentTs - pdTs) @@ -71,11 +73,11 @@ func (r *testBackup) TestGetTS(c *C) { c.Assert(duration, Less, expectedDuration+deviation) // timeago = "-1m" - _, err = r.backupClient.GetTS(r.ctx, -time.Minute) + _, err = r.backupClient.GetTS(r.ctx, -time.Minute, 0) c.Assert(err, ErrorMatches, "negative timeago is not allowed") // timeago = "1000000h" overflows - _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour) + _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour, 0) c.Assert(err, ErrorMatches, "backup ts overflow.*") // timeago = "10h" exceed GCSafePoint @@ -84,11 +86,17 @@ func (r *testBackup) TestGetTS(c *C) { now := oracle.ComposeTS(p, l) _, err = r.backupClient.mgr.GetPDClient().UpdateGCSafePoint(r.ctx, now) c.Assert(err, IsNil) - _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour) - // mocktikv pdClient.UpdateGCSafePoint return 0 forever - // so this error won't happen - // c.Assert(err, ErrorMatches, "GC safepoint [0-9]+ exceed TS [0-9]+") + _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour, 0) + //c.Assert(err, ErrorMatches, "GC safepoint [0-9]+ exceed TS [0-9]+") + //because 3.1 pd change behaviour of mockClient GetTS, above error won't happen + c.Assert(err, IsNil) + + // timeago and backupts both exists, use backupts + backupts := oracle.ComposeTS(p+10, l) + ts, err = r.backupClient.GetTS(r.ctx, time.Minute, backupts) c.Assert(err, IsNil) + c.Assert(ts, Equals, backupts) + } func (r *testBackup) TestBuildTableRange(c *C) { diff --git a/pkg/backup/metrics.go b/pkg/backup/metrics.go index fb982cc24..67d5fe1e5 100644 --- a/pkg/backup/metrics.go +++ b/pkg/backup/metrics.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/push.go b/pkg/backup/push.go index 23c4f01d4..d329f7088 100644 --- a/pkg/backup/push.go +++ b/pkg/backup/push.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -9,6 +11,9 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "go.uber.org/zap" + + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/rtree" ) // pushDown warps a backup task. @@ -34,10 +39,10 @@ func newPushDown(ctx context.Context, mgr ClientMgr, cap int) *pushDown { func (push *pushDown) pushBackup( req backup.BackupRequest, stores []*metapb.Store, - updateCh chan<- struct{}, -) (RangeTree, error) { + updateCh glue.Progress, +) (rtree.RangeTree, error) { // Push down backup tasks to all tikv instances. - res := newRangeTree() + res := rtree.NewRangeTree() wg := new(sync.WaitGroup) for _, s := range stores { storeID := s.GetId() @@ -82,11 +87,11 @@ func (push *pushDown) pushBackup( } if resp.GetError() == nil { // None error means range has been backuped successfully. - res.put( + res.Put( resp.GetStartKey(), resp.GetEndKey(), resp.GetFiles()) // Update progress - updateCh <- struct{}{} + updateCh.Inc() } else { errPb := resp.GetError() switch v := errPb.Detail.(type) { diff --git a/pkg/backup/safe_point.go b/pkg/backup/safe_point.go index bb73bc7d9..9fb7e5d74 100644 --- a/pkg/backup/safe_point.go +++ b/pkg/backup/safe_point.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -5,7 +7,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "go.uber.org/zap" ) diff --git a/pkg/backup/safe_point_test.go b/pkg/backup/safe_point_test.go index 1bea9e210..f2935b77c 100644 --- a/pkg/backup/safe_point_test.go +++ b/pkg/backup/safe_point_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -5,21 +7,21 @@ import ( "sync" . "github.com/pingcap/check" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testSaftPointSuite{}) type testSaftPointSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testSaftPointSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } diff --git a/pkg/backup/schema.go b/pkg/backup/schema.go index 66e4beec7..73a62477d 100644 --- a/pkg/backup/schema.go +++ b/pkg/backup/schema.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -16,6 +18,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -65,7 +68,7 @@ func (pending *Schemas) Start( store kv.Storage, backupTS uint64, concurrency uint, - updateCh chan<- struct{}, + updateCh glue.Progress, ) { workerPool := utils.NewWorkerPool(concurrency, "Schemas") go func() { @@ -80,7 +83,7 @@ func (pending *Schemas) Start( if pending.skipChecksum { pending.backupSchemaCh <- schema - updateCh <- struct{}{} + updateCh.Inc() return } @@ -108,7 +111,7 @@ func (pending *Schemas) Start( zap.Duration("take", time.Since(start))) pending.backupSchemaCh <- schema - updateCh <- struct{}{} + updateCh.Inc() }) } pending.wg.Wait() diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index f657310bf..98173dd55 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -1,26 +1,29 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( "context" "math" + "sync/atomic" . "github.com/pingcap/check" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testBackupSchemaSuite{}) type testBackupSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackupSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -28,6 +31,24 @@ func (s *testBackupSchemaSuite) TearDownSuite(c *C) { testleak.AfterTest(c)() } +type simpleProgress struct { + counter int64 +} + +func (sp *simpleProgress) Inc() { + atomic.AddInt64(&sp.counter, 1) +} + +func (sp *simpleProgress) Close() {} + +func (sp *simpleProgress) reset() { + atomic.StoreInt64(&sp.counter, 0) +} + +func (sp *simpleProgress) get() int64 { + return atomic.LoadInt64(&sp.counter) +} + func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(s.mock.Start(), IsNil) defer s.mock.Stop() @@ -41,7 +62,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(err, IsNil) _, backupSchemas, err := BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) // Database is not exist. @@ -51,15 +72,15 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, fooFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) - // Empty databse. + // Empty database. noFilter, err := filter.New(false, &filter.Rules{}) c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) tk.MustExec("use test") @@ -71,13 +92,13 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 1) - updateCh := make(chan struct{}, 2) + updateCh := new(simpleProgress) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) schemas, err := backupSchemas.finishTableChecksum() - <-updateCh + c.Assert(updateCh.get(), Equals, int64(1)) c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) @@ -91,13 +112,13 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 2) + updateCh.reset() backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 2, updateCh) schemas, err = backupSchemas.finishTableChecksum() - <-updateCh - <-updateCh + c.Assert(updateCh.get(), Equals, int64(2)) c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 2) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) diff --git a/pkg/checksum/executor.go b/pkg/checksum/executor.go index 94885a914..1070fba62 100644 --- a/pkg/checksum/executor.go +++ b/pkg/checksum/executor.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package checksum import ( @@ -61,7 +63,7 @@ func buildChecksumRequest( reqs := make([]*kv.Request, 0, (len(newTable.Indices)+1)*(len(partDefs)+1)) var oldTableID int64 if oldTable != nil { - oldTableID = oldTable.Schema.ID + oldTableID = oldTable.Info.ID } rs, err := buildRequest(newTable, newTable.ID, oldTable, oldTableID, startTS) if err != nil { @@ -72,7 +74,7 @@ func buildChecksumRequest( for _, partDef := range partDefs { var oldPartID int64 if oldTable != nil { - for _, oldPartDef := range oldTable.Schema.Partition.Definitions { + for _, oldPartDef := range oldTable.Info.Partition.Definitions { if oldPartDef.Name == partDef.Name { oldPartID = oldPartDef.ID } @@ -108,7 +110,7 @@ func buildRequest( } var oldIndexInfo *model.IndexInfo if oldTable != nil { - for _, oldIndex := range oldTable.Schema.Indices { + for _, oldIndex := range oldTable.Info.Indices { if oldIndex.Name == indexInfo.Name { oldIndexInfo = oldIndex break @@ -117,7 +119,7 @@ func buildRequest( if oldIndexInfo == nil { log.Panic("index not found", zap.Reflect("table", tableInfo), - zap.Reflect("oldTable", oldTable.Schema), + zap.Reflect("oldTable", oldTable.Info), zap.Stringer("index", indexInfo.Name)) } } diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index ca68628e2..43c90761d 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package checksum import ( @@ -12,6 +14,7 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) @@ -22,12 +25,12 @@ func TestT(t *testing.T) { var _ = Suite(&testChecksumSuite{}) type testChecksumSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testChecksumSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -61,7 +64,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { c.Assert(len(exe1.reqs), Equals, 1) resp, err := exe1.Execute(context.TODO(), s.mock.Storage.GetClient(), func() {}) c.Assert(err, IsNil) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(resp.Checksum, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalKvs, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalBytes, Equals, uint64(1), Commentf("%v", resp)) @@ -83,7 +86,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { // Test rewrite rules tk.MustExec("alter table t1 add index i2(a);") tableInfo1 = s.getTableInfo(c, "test", "t1") - oldTable := utils.Table{Schema: tableInfo1} + oldTable := utils.Table{Info: tableInfo1} exe2, err = NewExecutorBuilder(tableInfo2, math.MaxUint64). SetOldTable(&oldTable).Build() c.Assert(err, IsNil) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 892a56d8c..ed0813645 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -1,8 +1,11 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package conn import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "io" @@ -18,15 +21,18 @@ import ( "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/utils" ) const ( @@ -34,6 +40,7 @@ const ( clusterVersionPrefix = "pd/api/v1/config/cluster-version" regionCountPrefix = "pd/api/v1/stats/region" schdulerPrefix = "pd/api/v1/schedulers" + maxMsgSize = int(128 * utils.MB) // pd.ScanRegion may return a large response ) // Mgr manages connections to a TiDB cluster. @@ -43,12 +50,14 @@ type Mgr struct { addrs []string cli *http.Client } + tlsConf *tls.Config dom *domain.Domain storage tikv.Storage grpcClis struct { mu sync.Mutex clis map[uint64]*grpc.ClientConn } + ownsStorage bool } type pdHTTPRequest func(context.Context, string, string, *http.Client, string, io.Reader) ([]byte, error) @@ -57,9 +66,6 @@ func pdRequest( ctx context.Context, addr string, prefix string, cli *http.Client, method string, body io.Reader) ([]byte, error) { - if addr != "" && !strings.HasPrefix("http", addr) { - addr = "http://" + addr - } u, err := url.Parse(addr) if err != nil { return nil, errors.Trace(err) @@ -86,13 +92,90 @@ func pdRequest( return r, nil } +// StoreBehavior is the action to do in GetAllTiKVStores when a non-TiKV +// store (e.g. TiFlash store) is found. +type StoreBehavior uint8 + +const ( + // ErrorOnTiFlash causes GetAllTiKVStores to return error when the store is + // found to be a TiFlash node. + ErrorOnTiFlash StoreBehavior = 0 + // SkipTiFlash causes GetAllTiKVStores to skip the store when it is found to + // be a TiFlash node. + SkipTiFlash StoreBehavior = 1 + // TiFlashOnly caused GetAllTiKVStores to skip the store which is not a + // TiFlash node. + TiFlashOnly StoreBehavior = 2 +) + +// GetAllTiKVStores returns all TiKV stores registered to the PD client. The +// stores must not be a tombstone and must never contain a label `engine=tiflash`. +func GetAllTiKVStores( + ctx context.Context, + pdClient pd.Client, + storeBehavior StoreBehavior, +) ([]*metapb.Store, error) { + // get all live stores. + stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + if err != nil { + return nil, err + } + + // filter out all stores which are TiFlash. + j := 0 +skipStore: + for _, store := range stores { + var isTiFlash bool + for _, label := range store.Labels { + if label.Key == "engine" && label.Value == "tiflash" { + if storeBehavior == SkipTiFlash { + continue skipStore + } else if storeBehavior == ErrorOnTiFlash { + return nil, errors.Errorf( + "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) + } + isTiFlash = true + } + } + if !isTiFlash && storeBehavior == TiFlashOnly { + continue skipStore + } + stores[j] = store + j++ + } + return stores[:j], nil +} + // NewMgr creates a new Mgr. -func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, error) { +func NewMgr( + ctx context.Context, + g glue.Glue, + pdAddrs string, + storage tikv.Storage, + tlsConf *tls.Config, + securityOption pd.SecurityOption, + storeBehavior StoreBehavior, +) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") failure := errors.Errorf("pd address (%s) has wrong format", pdAddrs) cli := &http.Client{Timeout: 30 * time.Second} + if tlsConf != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + + processedAddrs := make([]string, 0, len(addrs)) for _, addr := range addrs { + if addr != "" && !strings.HasPrefix("http", addr) { + if tlsConf != nil { + addr = "https://" + addr + } else { + addr = "http://" + addr + } + } + processedAddrs = append(processedAddrs, addr) _, failure = pdRequest(ctx, addr, clusterVersionPrefix, cli, http.MethodGet, nil) // TODO need check cluster version >= 3.1 when br release if failure == nil { @@ -103,7 +186,12 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er return nil, errors.Annotatef(failure, "pd address (%s) not available, please check network", pdAddrs) } - pdClient, err := pd.NewClient(addrs, pd.SecurityOption{}) + maxCallMsgSize := []grpc.DialOption{ + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(maxMsgSize)), + } + pdClient, err := pd.NewClient( + addrs, securityOption, pd.WithGRPCDialOptions(maxCallMsgSize...)) if err != nil { log.Error("fail to create pd client", zap.Error(err)) return nil, err @@ -111,7 +199,7 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er log.Info("new mgr", zap.String("pdAddrs", pdAddrs)) // Check live tikv. - stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + stores, err := GetAllTiKVStores(ctx, pdClient, storeBehavior) if err != nil { log.Error("fail to get store", zap.Error(err)) return nil, err @@ -130,17 +218,19 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er return nil, errors.Errorf("tikv cluster not health %+v", stores) } - dom, err := session.BootstrapSession(storage) + dom, err := g.GetDomain(storage) if err != nil { return nil, errors.Trace(err) } mgr := &Mgr{ - pdClient: pdClient, - storage: storage, - dom: dom, + pdClient: pdClient, + storage: storage, + dom: dom, + tlsConf: tlsConf, + ownsStorage: g.OwnsStorage(), } - mgr.pdHTTP.addrs = addrs + mgr.pdHTTP.addrs = processedAddrs mgr.pdHTTP.cli = cli mgr.grpcClis.clis = make(map[uint64]*grpc.ClientConn) return mgr, nil @@ -216,6 +306,9 @@ func (mgr *Mgr) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc.Cl return nil, errors.Trace(err) } opt := grpc.WithInsecure() + if mgr.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(mgr.tlsConf)) + } ctx, cancel := context.WithTimeout(ctx, dialTimeout) keepAlive := 10 keepAliveTimeout := 3 @@ -274,6 +367,11 @@ func (mgr *Mgr) GetTiKV() tikv.Storage { return mgr.storage } +// GetTLSConfig returns the tls config +func (mgr *Mgr) GetTLSConfig() *tls.Config { + return mgr.tlsConf +} + // GetLockResolver gets the LockResolver. func (mgr *Mgr) GetLockResolver() *tikv.LockResolver { return mgr.storage.GetLockResolver() @@ -354,9 +452,14 @@ func (mgr *Mgr) Close() { // Gracefully shutdown domain so it does not affect other TiDB DDL. // Must close domain before closing storage, otherwise it gets stuck forever. - mgr.dom.Close() + if mgr.ownsStorage { + if mgr.dom != nil { + mgr.dom.Close() + } + + atomic.StoreUint32(&tikv.ShuttingDown, 1) + mgr.storage.Close() + } - atomic.StoreUint32(&tikv.ShuttingDown, 1) - mgr.storage.Close() mgr.pdClient.Close() } diff --git a/pkg/conn/conn_test.go b/pkg/conn/conn_test.go index f51a6df3d..ae7f2b0a8 100644 --- a/pkg/conn/conn_test.go +++ b/pkg/conn/conn_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package conn import ( @@ -13,8 +15,9 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/pd/server/core" - "github.com/pingcap/pd/server/statistics" + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/pd/v3/server/core" + "github.com/pingcap/pd/v3/server/statistics" "github.com/pingcap/tidb/util/codec" ) @@ -151,3 +154,102 @@ func (s *testClientSuite) TestRegionCount(c *C) { c.Assert(err, IsNil) c.Assert(resp, Equals, 2) } + +type fakePDClient struct { + pd.Client + stores []*metapb.Store +} + +func (fpdc fakePDClient) GetAllStores(context.Context, ...pd.GetStoreOption) ([]*metapb.Store, error) { + return append([]*metapb.Store{}, fpdc.stores...), nil +} + +func (s *testClientSuite) TestGetAllTiKVStores(c *C) { + testCases := []struct { + stores []*metapb.Store + storeBehavior StoreBehavior + expectedStores map[uint64]int + expectedError string + }{ + { + stores: []*metapb.Store{ + {Id: 1}, + }, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + }, + storeBehavior: ErrorOnTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + }, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + }, + storeBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1, 3: 1, 4: 1, 6: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: TiFlashOnly, + expectedStores: map[uint64]int{2: 1, 5: 1}, + }, + } + + for _, testCase := range testCases { + pdClient := fakePDClient{stores: testCase.stores} + stores, err := GetAllTiKVStores(context.Background(), pdClient, testCase.storeBehavior) + if len(testCase.expectedError) != 0 { + c.Assert(err, ErrorMatches, testCase.expectedError) + continue + } + foundStores := make(map[uint64]int) + for _, store := range stores { + foundStores[store.Id]++ + } + c.Assert(foundStores, DeepEquals, testCase.expectedStores) + } +} diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go new file mode 100644 index 000000000..49bd5c3c1 --- /dev/null +++ b/pkg/glue/glue.go @@ -0,0 +1,43 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package glue + +import ( + "context" + + "github.com/pingcap/parser/model" + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" +) + +// Glue is an abstraction of TiDB function calls used in BR. +type Glue interface { + GetDomain(store kv.Storage) (*domain.Domain, error) + CreateSession(store kv.Storage) (Session, error) + Open(path string, option pd.SecurityOption) (kv.Storage, error) + + // OwnsStorage returns whether the storage returned by Open() is owned + // If this method returns false, the connection manager will never close the storage. + OwnsStorage() bool + + StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) Progress +} + +// Session is an abstraction of the session.Session interface. +type Session interface { + Execute(ctx context.Context, sql string) error + CreateDatabase(ctx context.Context, schema *model.DBInfo) error + CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error + Close() +} + +// Progress is an interface recording the current execution progress. +type Progress interface { + // Inc increases the progress. This method must be goroutine-safe, and can + // be called from any goroutine. + Inc() + // Close marks the progress as 100% complete and that Inc() can no longer be + // called. + Close() +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go new file mode 100644 index 000000000..57bcad61a --- /dev/null +++ b/pkg/gluetidb/glue.go @@ -0,0 +1,92 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package gluetidb + +import ( + "context" + + "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/session" + + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/gluetikv" +) + +// Glue is an implementation of glue.Glue using a new TiDB session. +type Glue struct { + tikvGlue gluetikv.Glue +} + +type tidbSession struct { + se session.Session +} + +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return session.GetDomain(store) +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + se, err := session.CreateSession(store) + if err != nil { + return nil, err + } + return &tidbSession{se: se}, nil +} + +// Open implements glue.Glue +func (g Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + return g.tikvGlue.Open(path, option) +} + +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} + +// StartProgress implements glue.Glue +func (g Glue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { + return g.tikvGlue.StartProgress(ctx, cmdName, total, redirectLog) +} + +// Execute implements glue.Session +func (gs *tidbSession) Execute(ctx context.Context, sql string) error { + _, err := gs.se.Execute(ctx, sql) + return err +} + +// CreateDatabase implements glue.Session +func (gs *tidbSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { + d := domain.GetDomain(gs.se).DDL() + schema = schema.Clone() + if len(schema.Charset) == 0 { + schema.Charset = mysql.DefaultCharset + } + return d.CreateSchemaWithInfo(gs.se, schema, ddl.OnExistIgnore, true) +} + +// CreateTable implements glue.Session +func (gs *tidbSession) CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error { + d := domain.GetDomain(gs.se).DDL() + + // Clone() does not clone partitions yet :( + table = table.Clone() + if table.Partition != nil { + newPartition := *table.Partition + newPartition.Definitions = append([]model.PartitionDefinition{}, table.Partition.Definitions...) + table.Partition = &newPartition + } + + return d.CreateTableWithInfo(gs.se, dbName, table, ddl.OnExistIgnore, true) +} + +// Close implements glue.Session +func (gs *tidbSession) Close() { + gs.se.Close() +} diff --git a/pkg/gluetikv/glue.go b/pkg/gluetikv/glue.go new file mode 100644 index 000000000..62f0967b5 --- /dev/null +++ b/pkg/gluetikv/glue.go @@ -0,0 +1,65 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package gluetikv + +import ( + "context" + + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv" + + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/utils" +) + +// Glue is an implementation of glue.Glue that accesses only TiKV without TiDB. +type Glue struct{} + +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return nil, nil +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + return nil, nil +} + +// Open implements glue.Glue +func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + if option.CAPath != "" { + conf := config.GetGlobalConfig() + conf.Security.ClusterSSLCA = option.CAPath + conf.Security.ClusterSSLCert = option.CertPath + conf.Security.ClusterSSLKey = option.KeyPath + config.StoreGlobalConfig(conf) + } + return tikv.Driver{}.Open(path) +} + +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} + +// StartProgress implements glue.Glue +func (Glue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { + return progress{ch: utils.StartProgress(ctx, cmdName, total, redirectLog)} +} + +type progress struct { + ch chan<- struct{} +} + +// Inc implements glue.Progress +func (p progress) Inc() { + p.ch <- struct{}{} +} + +// Close implements glue.Progress +func (p progress) Close() { + close(p.ch) +} diff --git a/pkg/utils/mock_cluster.go b/pkg/mock/mock_cluster.go similarity index 90% rename from pkg/utils/mock_cluster.go rename to pkg/mock/mock_cluster.go index dc7b87c3c..a689ba669 100644 --- a/pkg/utils/mock_cluster.go +++ b/pkg/mock/mock_cluster.go @@ -1,4 +1,6 @@ -package utils +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package mock import ( "database/sql" @@ -14,8 +16,8 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" - "github.com/pingcap/pd/pkg/tempurl" + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/pd/v3/pkg/tempurl" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" @@ -28,8 +30,8 @@ import ( var pprofOnce sync.Once -// MockCluster is mock tidb cluster, includes tikv and pd. -type MockCluster struct { +// Cluster is mock tidb cluster, includes tikv and pd. +type Cluster struct { *server.Server *mocktikv.Cluster mocktikv.MVCCStore @@ -40,8 +42,8 @@ type MockCluster struct { PDClient pd.Client } -// NewMockCluster create a new mock cluster. -func NewMockCluster() (*MockCluster, error) { +// NewCluster create a new mock cluster. +func NewCluster() (*Cluster, error) { pprofOnce.Do(func() { go func() { // Make sure pprof is registered. @@ -72,7 +74,7 @@ func NewMockCluster() (*MockCluster, error) { if err != nil { return nil, err } - return &MockCluster{ + return &Cluster{ Cluster: cluster, MVCCStore: mvccStore, Storage: storage, @@ -82,7 +84,7 @@ func NewMockCluster() (*MockCluster, error) { } // Start runs a mock cluster -func (mock *MockCluster) Start() error { +func (mock *Cluster) Start() error { statusURL, err := url.Parse(tempurl.Alloc()) if err != nil { return err @@ -124,7 +126,7 @@ func (mock *MockCluster) Start() error { } // Stop stops a mock cluster -func (mock *MockCluster) Stop() { +func (mock *Cluster) Stop() { if mock.Domain != nil { mock.Domain.Close() } diff --git a/pkg/mock/mock_cluster_test.go b/pkg/mock/mock_cluster_test.go new file mode 100644 index 000000000..1db0f5a8c --- /dev/null +++ b/pkg/mock/mock_cluster_test.go @@ -0,0 +1,29 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package mock + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testClusterSuite{}) + +type testClusterSuite struct { + mock *Cluster +} + +func (s *testClusterSuite) SetUpSuite(c *C) { + var err error + s.mock, err = NewCluster() + c.Assert(err, IsNil) +} + +func (s *testClusterSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testClusterSuite) TestSmoke(c *C) { + c.Assert(s.mock.Start(), IsNil) + s.mock.Stop() +} diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go index dae14e109..a84014c11 100644 --- a/pkg/restore/backoff.go +++ b/pkg/restore/backoff.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -11,21 +13,13 @@ import ( ) var ( - errNotLeader = errors.NewNoStackError("not leader") errEpochNotMatch = errors.NewNoStackError("epoch not match") errKeyNotInRegion = errors.NewNoStackError("key not in region") - errRegionNotFound = errors.NewNoStackError("region not found") - errResp = errors.NewNoStackError("response error") errRewriteRuleNotFound = errors.NewNoStackError("rewrite rule not found") errRangeIsEmpty = errors.NewNoStackError("range is empty") errGrpc = errors.NewNoStackError("gRPC error") - - // TODO: add `error` field to `DownloadResponse` for distinguish the errors of gRPC - // and the errors of request - errBadFormat = errors.NewNoStackError("bad format") - errWrongKeyPrefix = errors.NewNoStackError("wrong key prefix") - errFileCorrupted = errors.NewNoStackError("file corrupted") - errCannotRead = errors.NewNoStackError("cannot read externel storage") + errDownloadFailed = errors.NewNoStackError("download sst failed") + errIngestFailed = errors.NewNoStackError("ingest sst failed") ) const ( @@ -66,7 +60,7 @@ func newDownloadSSTBackoffer() utils.Backoffer { func (bo *importerBackoffer) NextBackoff(err error) time.Duration { switch errors.Cause(err) { - case errResp, errGrpc, errEpochNotMatch, errNotLeader: + case errGrpc, errEpochNotMatch, errIngestFailed: bo.delayTime = 2 * bo.delayTime bo.attempt-- case errRangeIsEmpty, errRewriteRuleNotFound: @@ -89,21 +83,21 @@ func (bo *importerBackoffer) Attempt() int { return bo.attempt } -type resetTSBackoffer struct { +type pdReqBackoffer struct { attempt int delayTime time.Duration maxDelayTime time.Duration } -func newResetTSBackoffer() utils.Backoffer { - return &resetTSBackoffer{ +func newPDReqBackoffer() utils.Backoffer { + return &pdReqBackoffer{ attempt: resetTsRetryTime, delayTime: resetTSWaitInterval, maxDelayTime: resetTSMaxWaitInterval, } } -func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { +func (bo *pdReqBackoffer) NextBackoff(err error) time.Duration { bo.delayTime = 2 * bo.delayTime bo.attempt-- if bo.delayTime > bo.maxDelayTime { @@ -112,6 +106,6 @@ func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { return bo.delayTime } -func (bo *resetTSBackoffer) Attempt() int { +func (bo *pdReqBackoffer) Attempt() int { return bo.attempt } diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go index 537f0980c..a07c0839b 100644 --- a/pkg/restore/backoff_test.go +++ b/pkg/restore/backoff_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -7,18 +9,19 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testBackofferSuite{}) type testBackofferSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackofferSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -34,7 +37,7 @@ func (s *testBackofferSuite) TestImporterBackoffer(c *C) { case 0: return errGrpc case 1: - return errResp + return errEpochNotMatch case 2: return errRangeIsEmpty } @@ -51,8 +54,8 @@ func (s *testBackofferSuite) TestImporterBackoffer(c *C) { } err = utils.WithRetry(context.Background(), func() error { defer func() { counter++ }() - return errResp + return errEpochNotMatch }, &backoffer) c.Assert(counter, Equals, 10) - c.Assert(err, Equals, errResp) + c.Assert(err, Equals, errEpochNotMatch) } diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 0e414572e..75feb6fda 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -1,26 +1,44 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( + "bytes" "context" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" "math" + "sort" + "strconv" "sync" "time" + "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/pd/v3/server/schedule/placement" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -34,38 +52,49 @@ type Client struct { ctx context.Context cancel context.CancelFunc - pdClient pd.Client - fileImporter FileImporter - workerPool *utils.WorkerPool - tableWorkerPool *utils.WorkerPool + pdClient pd.Client + toolClient SplitClient + fileImporter FileImporter + workerPool *utils.WorkerPool + tlsConf *tls.Config databases map[string]*utils.Database + ddlJobs []*model.Job backupMeta *backup.BackupMeta db *DB rateLimit uint64 isOnline bool + noSchema bool hasSpeedLimited bool + + restoreStores []uint64 + + storage storage.ExternalStorage + backend *backup.StorageBackend } // NewRestoreClient returns a new RestoreClient func NewRestoreClient( ctx context.Context, + g glue.Glue, pdClient pd.Client, store kv.Storage, + tlsConf *tls.Config, ) (*Client, error) { ctx, cancel := context.WithCancel(ctx) - db, err := NewDB(store) + db, err := NewDB(g, store) if err != nil { cancel() return nil, errors.Trace(err) } return &Client{ - ctx: ctx, - cancel: cancel, - pdClient: pdClient, - tableWorkerPool: utils.NewWorkerPool(128, "table"), - db: db, + ctx: ctx, + cancel: cancel, + pdClient: pdClient, + toolClient: NewSplitClient(pdClient, tlsConf), + db: db, + tlsConf: tlsConf, }, nil } @@ -74,6 +103,17 @@ func (rc *Client) SetRateLimit(rateLimit uint64) { rc.rateLimit = rateLimit } +// SetStorage set ExternalStorage for client +func (rc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { + var err error + rc.storage, err = storage.Create(ctx, backend, sendCreds) + if err != nil { + return err + } + rc.backend = backend + return nil +} + // GetPDClient returns a pd client. func (rc *Client) GetPDClient() pd.Client { return rc.pdClient @@ -86,26 +126,97 @@ func (rc *Client) IsOnline() bool { // Close a client func (rc *Client) Close() { - rc.db.Close() + // rc.db can be nil in raw kv mode. + if rc.db != nil { + rc.db.Close() + } rc.cancel() log.Info("Restore client closed") } // InitBackupMeta loads schemas from BackupMeta to initialize RestoreClient func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup.StorageBackend) error { - databases, err := utils.LoadBackupTables(backupMeta) - if err != nil { - return errors.Trace(err) + if !backupMeta.IsRawKv { + databases, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return errors.Trace(err) + } + rc.databases = databases + + var ddlJobs []*model.Job + err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) + if err != nil { + return errors.Trace(err) + } + rc.ddlJobs = ddlJobs } - rc.databases = databases rc.backupMeta = backupMeta + log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) - metaClient := NewSplitClient(rc.pdClient) - importClient := NewImportClient(metaClient) - rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) + metaClient := NewSplitClient(rc.pdClient, rc.tlsConf) + importClient := NewImportClient(metaClient, rc.tlsConf) + rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, backupMeta.IsRawKv, rc.rateLimit) return nil } +// IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden. +func (rc *Client) IsRawKvMode() bool { + return rc.backupMeta.IsRawKv +} + +// GetFilesInRawRange gets all files that are in the given range or intersects with the given range. +func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backup.File, error) { + if !rc.IsRawKvMode() { + return nil, errors.New("the backup data is not in raw kv mode") + } + + for _, rawRange := range rc.backupMeta.RawRanges { + // First check whether the given range is backup-ed. If not, we cannot perform the restore. + if rawRange.Cf != cf { + continue + } + + if (len(rawRange.EndKey) > 0 && bytes.Compare(startKey, rawRange.EndKey) >= 0) || + (len(endKey) > 0 && bytes.Compare(rawRange.StartKey, endKey) >= 0) { + // The restoring range is totally out of the current range. Skip it. + continue + } + + if bytes.Compare(startKey, rawRange.StartKey) < 0 || + utils.CompareEndKey(endKey, rawRange.EndKey) > 0 { + // Only partial of the restoring range is in the current backup-ed range. So the given range can't be fully + // restored. + return nil, errors.New("the given range to restore is not fully covered by the range that was backed up") + } + + // We have found the range that contains the given range. Find all necessary files. + files := make([]*backup.File, 0) + + for _, file := range rc.backupMeta.Files { + if file.Cf != cf { + continue + } + + if len(file.EndKey) > 0 && bytes.Compare(file.EndKey, startKey) < 0 { + // The file is before the range to be restored. + continue + } + if len(endKey) > 0 && bytes.Compare(endKey, file.StartKey) <= 0 { + // The file is after the range to be restored. + // The specified endKey is exclusive, so when it equals to a file's startKey, the file is still skipped. + continue + } + + files = append(files, file) + } + + // There should be at most one backed up range that covers the restoring range. + return files, nil + } + + return nil, errors.New("no backup data in the range") +} + // SetConcurrency sets the concurrency of dbs tables files func (rc *Client) SetConcurrency(c uint) { rc.workerPool = utils.NewWorkerPool(c, "file") @@ -116,6 +227,11 @@ func (rc *Client) EnableOnline() { rc.isOnline = true } +// GetTLSConfig returns the tls config +func (rc *Client) GetTLSConfig() *tls.Config { + return rc.tlsConf +} + // GetTS gets a new timestamp from PD func (rc *Client) GetTS(ctx context.Context) (uint64, error) { p, l, err := rc.pdClient.GetTS(ctx) @@ -133,8 +249,23 @@ func (rc *Client) ResetTS(pdAddrs []string) error { i := 0 return utils.WithRetry(rc.ctx, func() error { idx := i % len(pdAddrs) - return utils.ResetTS(pdAddrs[idx], restoreTS) - }, newResetTSBackoffer()) + i++ + return utils.ResetTS(pdAddrs[idx], restoreTS, rc.tlsConf) + }, newPDReqBackoffer()) +} + +// GetPlacementRules return the current placement rules +func (rc *Client) GetPlacementRules(pdAddrs []string) ([]placement.Rule, error) { + var placementRules []placement.Rule + i := 0 + errRetry := utils.WithRetry(rc.ctx, func() error { + var err error + idx := i % len(pdAddrs) + i++ + placementRules, err = utils.GetPlacementRules(pdAddrs[idx], rc.tlsConf) + return err + }, newPDReqBackoffer()) + return placementRules, errRetry } // GetDatabases returns all databases. @@ -151,6 +282,11 @@ func (rc *Client) GetDatabase(name string) *utils.Database { return rc.databases[name] } +// GetDDLJobs returns ddl jobs +func (rc *Client) GetDDLJobs() []*model.Job { + return rc.ddlJobs +} + // GetTableSchema returns the schema of a table from TiDB. func (rc *Client) GetTableSchema( dom *domain.Domain, @@ -170,6 +306,10 @@ func (rc *Client) GetTableSchema( // CreateDatabase creates a database. func (rc *Client) CreateDatabase(db *model.DBInfo) error { + if rc.IsSkipCreateSQL() { + log.Info("skip create database", zap.Stringer("database", db.Name)) + return nil + } return rc.db.CreateDatabase(rc.ctx, db) } @@ -185,15 +325,19 @@ func (rc *Client) CreateTables( } newTables := make([]*model.TableInfo, 0, len(tables)) for _, table := range tables { - err := rc.db.CreateTable(rc.ctx, table) - if err != nil { - return nil, nil, err + if rc.IsSkipCreateSQL() { + log.Info("skip create table and alter autoIncID", zap.Stringer("table", table.Info.Name)) + } else { + err := rc.db.CreateTable(rc.ctx, table) + if err != nil { + return nil, nil, err + } } - newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Schema.Name) + newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Info.Name) if err != nil { return nil, nil, err } - rules := GetRewriteRules(newTableInfo, table.Schema, newTS) + rules := GetRewriteRules(newTableInfo, table.Info, newTS) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) newTables = append(newTables, newTableInfo) @@ -201,9 +345,100 @@ func (rc *Client) CreateTables( return rewriteRules, newTables, nil } +// RemoveTiFlashReplica removes all the tiflash replicas of a table +// TODO: remove this after tiflash supports restore +func (rc *Client) RemoveTiFlashReplica( + tables []*utils.Table, newTables []*model.TableInfo, placementRules []placement.Rule) error { + schemas := make([]*backup.Schema, 0, len(tables)) + var updateReplica bool + // must use new table id to search placement rules + // here newTables and tables must have same order + for i, table := range tables { + if rule := utils.SearchPlacementRule(newTables[i].ID, placementRules, placement.Learner); rule != nil { + table.TiFlashReplicas = rule.Count + updateReplica = true + } + tableData, err := json.Marshal(newTables[i]) + if err != nil { + return errors.Trace(err) + } + dbData, err := json.Marshal(table.Db) + if err != nil { + return errors.Trace(err) + } + schemas = append(schemas, &backup.Schema{ + Db: dbData, + Table: tableData, + Crc64Xor: table.Crc64Xor, + TotalKvs: table.TotalKvs, + TotalBytes: table.TotalBytes, + TiflashReplicas: uint32(table.TiFlashReplicas), + }) + } + + if updateReplica { + // Update backup meta + rc.backupMeta.Schemas = schemas + backupMetaData, err := proto.Marshal(rc.backupMeta) + if err != nil { + return errors.Trace(err) + } + backendURL := storage.FormatBackendURL(rc.backend) + log.Info("update backup meta", zap.Stringer("path", &backendURL)) + err = rc.storage.Write(rc.ctx, utils.SavedMetaFile, backupMetaData) + if err != nil { + return errors.Trace(err) + } + } + + for _, table := range tables { + if table.TiFlashReplicas > 0 { + err := rc.db.AlterTiflashReplica(rc.ctx, table, 0) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// RecoverTiFlashReplica recovers all the tiflash replicas of a table +// TODO: remove this after tiflash supports restore +func (rc *Client) RecoverTiFlashReplica(tables []*utils.Table) error { + for _, table := range tables { + if table.TiFlashReplicas > 0 { + err := rc.db.AlterTiflashReplica(rc.ctx, table, table.TiFlashReplicas) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// ExecDDLs executes the queries of the ddl jobs. +func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { + // Sort the ddl jobs by schema version in ascending order. + sort.Slice(ddlJobs, func(i, j int) bool { + return ddlJobs[i].BinlogInfo.SchemaVersion < ddlJobs[j].BinlogInfo.SchemaVersion + }) + + for _, job := range ddlJobs { + err := rc.db.ExecDDL(rc.ctx, job) + if err != nil { + return errors.Trace(err) + } + log.Info("execute ddl query", + zap.String("db", job.SchemaName), + zap.String("query", job.Query), + zap.Int64("historySchemaVersion", job.BinlogInfo.SchemaVersion)) + } + return nil +} + func (rc *Client) setSpeedLimit() error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { - stores, err := rc.pdClient.GetAllStores(rc.ctx, pd.WithExcludeTombstone()) + stores, err := conn.GetAllTiKVStores(rc.ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return err } @@ -222,7 +457,8 @@ func (rc *Client) setSpeedLimit() error { func (rc *Client) RestoreFiles( files []*backup.File, rewriteRules *RewriteRules, - updateCh chan<- struct{}, + rejectStoreMap map[uint64]bool, + updateCh glue.Progress, ) (err error) { start := time.Now() defer func() { @@ -230,9 +466,7 @@ func (rc *Client) RestoreFiles( if err == nil { log.Info("Restore Files", zap.Int("files", len(files)), zap.Duration("take", elapsed)) - summary.CollectSuccessUnit("files", elapsed) - } else { - summary.CollectFailureUnit("files", err) + summary.CollectSuccessUnit("files", len(files), elapsed) } }() @@ -255,15 +489,16 @@ func (rc *Client) RestoreFiles( defer wg.Done() select { case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.fileImporter.Import(fileReplica, rewriteRules): - updateCh <- struct{}{} + errCh <- rc.ctx.Err() + case errCh <- rc.fileImporter.Import(fileReplica, rejectStoreMap, rewriteRules): + updateCh.Inc() } }) } - for range files { + for i := range files { err := <-errCh if err != nil { + summary.CollectFailureUnit(fmt.Sprintf("file:%d", i), err) rc.cancel() wg.Wait() log.Error( @@ -276,6 +511,63 @@ func (rc *Client) RestoreFiles( return nil } +// RestoreRaw tries to restore raw keys in the specified range. +func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.File, updateCh glue.Progress) error { + start := time.Now() + defer func() { + elapsed := time.Since(start) + log.Info("Restore Raw", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Duration("take", elapsed)) + }() + errCh := make(chan error, len(files)) + wg := new(sync.WaitGroup) + defer close(errCh) + + err := rc.fileImporter.SetRawRange(startKey, endKey) + if err != nil { + + return errors.Trace(err) + } + + emptyRules := &RewriteRules{} + for _, file := range files { + wg.Add(1) + fileReplica := file + rc.workerPool.Apply( + func() { + defer wg.Done() + select { + case <-rc.ctx.Done(): + errCh <- rc.ctx.Err() + case errCh <- rc.fileImporter.Import(fileReplica, nil, emptyRules): + updateCh.Inc() + } + }) + } + for range files { + err := <-errCh + if err != nil { + rc.cancel() + wg.Wait() + log.Error( + "restore raw range failed", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Error(err), + ) + return err + } + } + log.Info( + "finish to restore raw range", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + ) + return nil +} + //SwitchToImportMode switch tikv cluster to import mode func (rc *Client) SwitchToImportMode(ctx context.Context) error { return rc.switchTiKVMode(ctx, import_sstpb.SwitchMode_Import) @@ -287,12 +579,17 @@ func (rc *Client) SwitchToNormalMode(ctx context.Context) error { } func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMode) error { - stores, err := rc.pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return errors.Trace(err) } + bfConf := backoff.DefaultConfig + bfConf.MaxDelay = time.Second * 3 for _, store := range stores { opt := grpc.WithInsecure() + if rc.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) + } gctx, cancel := context.WithTimeout(ctx, time.Second*5) keepAlive := 10 keepAliveTimeout := 3 @@ -300,15 +597,7 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo gctx, store.GetAddress(), opt, - grpc.WithConnectParams(grpc.ConnectParams{ - Backoff: backoff.Config{ - BaseDelay: time.Second, // Default was 1s. - Multiplier: 1.6, // Default - Jitter: 0.2, // Default - MaxDelay: 3 * time.Second, // Default was 120s. - }, - MinConnectTimeout: 5 * time.Second, - }), + grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: time.Duration(keepAlive) * time.Second, Timeout: time.Duration(keepAliveTimeout) * time.Second, @@ -341,7 +630,7 @@ func (rc *Client) ValidateChecksum( kvClient kv.Client, tables []*utils.Table, newTables []*model.TableInfo, - updateCh chan<- struct{}, + updateCh glue.Progress, ) error { start := time.Now() defer func() { @@ -386,7 +675,7 @@ func (rc *Client) ValidateChecksum( checksumResp.TotalBytes != table.TotalBytes { log.Error("failed in validate checksum", zap.String("database", table.Db.Name.L), - zap.String("table", table.Schema.Name.L), + zap.String("table", table.Info.Name.L), zap.Uint64("origin tidb crc64", table.Crc64Xor), zap.Uint64("calculated crc64", checksumResp.Checksum), zap.Uint64("origin tidb total kvs", table.TotalKvs), @@ -398,7 +687,7 @@ func (rc *Client) ValidateChecksum( return } - updateCh <- struct{}{} + updateCh.Inc() }) } wg.Wait() @@ -413,8 +702,171 @@ func (rc *Client) ValidateChecksum( return nil } +const ( + restoreLabelKey = "exclusive" + restoreLabelValue = "restore" +) + +// LoadRestoreStores loads the stores used to restore data. +func (rc *Client) LoadRestoreStores(ctx context.Context) error { + if !rc.isOnline { + return nil + } + + stores, err := rc.pdClient.GetAllStores(ctx) + if err != nil { + return err + } + for _, s := range stores { + if s.GetState() != metapb.StoreState_Up { + continue + } + for _, l := range s.GetLabels() { + if l.GetKey() == restoreLabelKey && l.GetValue() == restoreLabelValue { + rc.restoreStores = append(rc.restoreStores, s.GetId()) + break + } + } + } + log.Info("load restore stores", zap.Uint64s("store-ids", rc.restoreStores)) + return nil +} + +// ResetRestoreLabels removes the exclusive labels of the restore stores. +func (rc *Client) ResetRestoreLabels(ctx context.Context) error { + if !rc.isOnline { + return nil + } + log.Info("start reseting store labels") + return rc.toolClient.SetStoresLabel(ctx, rc.restoreStores, restoreLabelKey, "") +} + +// SetupPlacementRules sets rules for the tables' regions. +func (rc *Client) SetupPlacementRules(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start setting placement rules") + rule, err := rc.toolClient.GetPlacementRule(ctx, "pd", "default") + if err != nil { + return err + } + rule.Index = 100 + rule.Override = true + rule.LabelConstraints = append(rule.LabelConstraints, placement.LabelConstraint{ + Key: restoreLabelKey, + Op: "in", + Values: []string{restoreLabelValue}, + }) + for _, t := range tables { + rule.ID = rc.getRuleID(t.ID) + rule.StartKeyHex = hex.EncodeToString(codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID))) + rule.EndKeyHex = hex.EncodeToString(codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID+1))) + err = rc.toolClient.SetPlacementRule(ctx, rule) + if err != nil { + return err + } + } + log.Info("finish setting placement rules") + return nil +} + +// WaitPlacementSchedule waits PD to move tables to restore stores. +func (rc *Client) WaitPlacementSchedule(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start waiting placement schedule") + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for { + select { + case <-ticker.C: + ok, progress, err := rc.checkRegions(ctx, tables) + if err != nil { + return err + } + if ok { + log.Info("finish waiting placement schedule") + return nil + } + log.Info("placement schedule progress: " + progress) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (rc *Client) checkRegions(ctx context.Context, tables []*model.TableInfo) (bool, string, error) { + for i, t := range tables { + start := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID)) + end := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID+1)) + ok, regionProgress, err := rc.checkRange(ctx, start, end) + if err != nil { + return false, "", err + } + if !ok { + return false, fmt.Sprintf("table %v/%v, %s", i, len(tables), regionProgress), nil + } + } + return true, "", nil +} + +func (rc *Client) checkRange(ctx context.Context, start, end []byte) (bool, string, error) { + regions, err := rc.toolClient.ScanRegions(ctx, start, end, -1) + if err != nil { + return false, "", err + } + for i, r := range regions { + NEXT_PEER: + for _, p := range r.Region.GetPeers() { + for _, storeID := range rc.restoreStores { + if p.GetStoreId() == storeID { + continue NEXT_PEER + } + } + return false, fmt.Sprintf("region %v/%v", i, len(regions)), nil + } + } + return true, "", nil +} + +// ResetPlacementRules removes placement rules for tables. +func (rc *Client) ResetPlacementRules(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start reseting placement rules") + var failedTables []int64 + for _, t := range tables { + err := rc.toolClient.DeletePlacementRule(ctx, "pd", rc.getRuleID(t.ID)) + if err != nil { + log.Info("failed to delete placement rule for table", zap.Int64("table-id", t.ID)) + failedTables = append(failedTables, t.ID) + } + } + if len(failedTables) > 0 { + return errors.Errorf("failed to delete placement rules for tables %v", failedTables) + } + return nil +} + +func (rc *Client) getRuleID(tableID int64) string { + return "restore-t" + strconv.FormatInt(tableID, 10) +} + // IsIncremental returns whether this backup is incremental func (rc *Client) IsIncremental() bool { return !(rc.backupMeta.StartVersion == rc.backupMeta.EndVersion || rc.backupMeta.StartVersion == 0) } + +// EnableSkipCreateSQL sets switch of skip create schema and tables +func (rc *Client) EnableSkipCreateSQL() { + rc.noSchema = true +} + +// IsSkipCreateSQL returns whether we need skip create schema and tables in restore +func (rc *Client) IsSkipCreateSQL() bool { + return rc.noSchema +} diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 5007f1281..13b5caa0a 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -12,18 +14,20 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreClientSuite{}) type testRestoreClientSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreClientSuite) SetUpTest(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -36,7 +40,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() @@ -52,7 +56,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { for i := len(tables) - 1; i >= 0; i-- { tables[i] = &utils.Table{ Db: dbSchema, - Schema: &model.TableInfo{ + Info: &model.TableInfo{ ID: int64(i), Name: model.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ @@ -68,6 +72,10 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { } rules, newTables, err := client.CreateTables(s.mock.Domain, tables, 0) c.Assert(err, IsNil) + // make sure tables and newTables have same order + for i, t := range tables { + c.Assert(newTables[i].Name, Equals, t.Info.Name) + } for _, nt := range newTables { c.Assert(nt.Name.String(), Matches, "test[0-3]") } @@ -93,7 +101,7 @@ func (s *testRestoreClientSuite) TestIsOnline(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() diff --git a/pkg/restore/db.go b/pkg/restore/db.go index b114b7629..6197ff7a2 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -1,35 +1,39 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( - "bytes" "context" "fmt" - "strings" + "sort" "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/session" "go.uber.org/zap" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/utils" ) // DB is a TiDB instance, not thread-safe. type DB struct { - se session.Session + se glue.Session } // NewDB returns a new DB -func NewDB(store kv.Storage) (*DB, error) { - se, err := session.CreateSession(store) +func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { + se, err := g.CreateSession(store) if err != nil { return nil, errors.Trace(err) } + // The session may be nil in raw kv mode + if se == nil { + return nil, nil + } // Set SQL mode to None for avoiding SQL compatibility problem - _, err = se.Execute(context.Background(), "set @@sql_mode=''") + err = se.Execute(context.Background(), "set @@sql_mode=''") if err != nil { return nil, errors.Trace(err) } @@ -38,72 +42,98 @@ func NewDB(store kv.Storage) (*DB, error) { }, nil } +// ExecDDL executes the query of a ddl job. +func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { + var err error + if ddlJob.BinlogInfo.TableInfo != nil { + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(ddlJob.SchemaName)) + err = db.se.Execute(ctx, switchDbSQL) + if err != nil { + log.Error("switch db failed", + zap.String("query", switchDbSQL), + zap.String("db", ddlJob.SchemaName), + zap.Error(err)) + return errors.Trace(err) + } + } + err = db.se.Execute(ctx, ddlJob.Query) + if err != nil { + log.Error("execute ddl query failed", + zap.String("query", ddlJob.Query), + zap.String("db", ddlJob.SchemaName), + zap.Int64("historySchemaVersion", ddlJob.BinlogInfo.SchemaVersion), + zap.Error(err)) + } + return errors.Trace(err) +} + // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { - var buf bytes.Buffer - err := executor.ConstructResultOfShowCreateDatabase(db.se, schema, true, &buf) + err := db.se.CreateDatabase(ctx, schema) if err != nil { - log.Error("build create database SQL failed", zap.Stringer("db", schema.Name), zap.Error(err)) - return errors.Trace(err) + log.Error("create database failed", zap.Stringer("db", schema.Name), zap.Error(err)) } - createSQL := buf.String() - _, err = db.se.Execute(ctx, createSQL) - if err != nil { - log.Error("create database failed", zap.String("SQL", createSQL), zap.Error(err)) - return errors.Trace(err) - } - return nil + return errors.Trace(err) } // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { - var buf bytes.Buffer - schema := table.Schema - err := executor.ConstructResultOfShowCreateTable(db.se, schema, newIDAllocator(schema.AutoIncID), &buf) + err := db.se.CreateTable(ctx, table.Db.Name, table.Info) if err != nil { - log.Error( - "build create table SQL failed", + log.Error("create table failed", zap.Stringer("db", table.Db.Name), - zap.Stringer("table", schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) return errors.Trace(err) } - switchDbSQL := fmt.Sprintf("use %s;", table.Db.Name) - _, err = db.se.Execute(ctx, switchDbSQL) + alterAutoIncIDSQL := fmt.Sprintf( + "alter table %s.%s auto_increment = %d", + utils.EncloseName(table.Db.Name.O), + utils.EncloseName(table.Info.Name.O), + table.Info.AutoIncID) + err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { - log.Error("switch db failed", - zap.String("SQL", switchDbSQL), + log.Error("alter AutoIncID failed", + zap.String("query", alterAutoIncIDSQL), zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) - return errors.Trace(err) } - createSQL := buf.String() - // Insert `IF NOT EXISTS` statement to skip the created tables - words := strings.SplitN(createSQL, " ", 3) - if len(words) > 2 && strings.ToUpper(words[0]) == "CREATE" && strings.ToUpper(words[1]) == "TABLE" { - createSQL = "CREATE TABLE IF NOT EXISTS " + words[2] - } - _, err = db.se.Execute(ctx, createSQL) + + return errors.Trace(err) +} + +// AlterTiflashReplica alters the replica count of tiflash +func (db *DB) AlterTiflashReplica(ctx context.Context, table *utils.Table, count int) error { + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) + err := db.se.Execute(ctx, switchDbSQL) if err != nil { - log.Error("create table failed", - zap.String("SQL", createSQL), + log.Error("switch db failed", + zap.String("SQL", switchDbSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), zap.Error(err)) return errors.Trace(err) } - alterAutoIncIDSQL := fmt.Sprintf( - "alter table %s auto_increment = %d", - escapeTableName(schema.Name), - schema.AutoIncID) - _, err = db.se.Execute(ctx, alterAutoIncIDSQL) + alterTiFlashSQL := fmt.Sprintf( + "alter table %s set tiflash replica %d", + utils.EncloseName(table.Info.Name.O), + count, + ) + err = db.se.Execute(ctx, alterTiFlashSQL) if err != nil { - log.Error("alter AutoIncID failed", - zap.String("SQL", alterAutoIncIDSQL), + log.Error("alter tiflash replica failed", + zap.String("query", alterTiFlashSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) - return errors.Trace(err) + return err + } else if table.TiFlashReplicas > 0 { + log.Warn("alter tiflash replica done", + zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), + zap.Int("originalReplicaCount", table.TiFlashReplicas), + zap.Int("replicaCount", count)) + } return nil } @@ -112,3 +142,71 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { func (db *DB) Close() { db.se.Close() } + +// FilterDDLJobs filters ddl jobs +func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*model.Job) { + // Sort the ddl jobs by schema version in descending order. + sort.Slice(allDDLJobs, func(i, j int) bool { + return allDDLJobs[i].BinlogInfo.SchemaVersion > allDDLJobs[j].BinlogInfo.SchemaVersion + }) + dbs := getDatabases(tables) + for _, db := range dbs { + // These maps is for solving some corner case. + // e.g. let "t=2" indicates that the id of database "t" is 2, if the ddl execution sequence is: + // rename "a" to "b"(a=1) -> drop "b"(b=1) -> create "b"(b=2) -> rename "b" to "a"(a=2) + // Which we cannot find the "create" DDL by name and id directly. + // To cover †his case, we must find all names and ids the database/table ever had. + dbIDs := make(map[int64]bool) + dbIDs[db.ID] = true + dbNames := make(map[string]bool) + dbNames[db.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.DBInfo != nil { + if dbIDs[job.SchemaID] || dbNames[job.BinlogInfo.DBInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + // The the jobs executed with the old id, like the step 2 in the example above. + dbIDs[job.SchemaID] = true + // For the jobs executed after rename, like the step 3 in the example above. + dbNames[job.BinlogInfo.DBInfo.Name.String()] = true + } + } + } + } + + type namePair struct { + db string + table string + } + + for _, table := range tables { + tableIDs := make(map[int64]bool) + tableIDs[table.Info.ID] = true + tableNames := make(map[namePair]bool) + name := namePair{table.Db.Name.String(), table.Info.Name.String()} + tableNames[name] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.TableInfo != nil { + name := namePair{job.SchemaName, job.BinlogInfo.TableInfo.Name.String()} + if tableIDs[job.TableID] || tableNames[name] { + ddlJobs = append(ddlJobs, job) + tableIDs[job.TableID] = true + // For truncate table, the id may be changed + tableIDs[job.BinlogInfo.TableInfo.ID] = true + tableNames[name] = true + } + } + } + } + return ddlJobs +} + +func getDatabases(tables []*utils.Table) (dbs []*model.DBInfo) { + dbIDs := make(map[int64]bool) + for _, table := range tables { + if !dbIDs[table.Db.ID] { + dbs = append(dbs, table.Db) + dbIDs[table.Db.ID] = true + } + } + return +} diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 98341f510..3f77a53dd 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -12,32 +14,34 @@ import ( "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreSchemaSuite{}) type testRestoreSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) + c.Assert(s.mock.Start(), IsNil) } func TestT(t *testing.T) { TestingT(t) } func (s *testRestoreSchemaSuite) TearDownSuite(c *C) { + s.mock.Stop() testleak.AfterTest(c)() } func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { - c.Assert(s.mock.Start(), IsNil) - defer s.mock.Stop() - tk := testkit.NewTestKit(c, s.mock.Storage) tk.MustExec("use test") tk.MustExec("set @@sql_mode=''") @@ -60,17 +64,17 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { tableInfo, err := info.TableByName(model.NewCIStr("test"), model.NewCIStr("\"t\"")) c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) table := utils.Table{ - Schema: tableInfo.Meta(), - Db: dbInfo, + Info: tableInfo.Meta(), + Db: dbInfo, } // Get the next AutoIncID idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false, autoid.RowIDAllocType) - globalAutoID, err := idAlloc.NextGlobalAutoID(table.Schema.ID) + globalAutoID, err := idAlloc.NextGlobalAutoID(table.Info.ID) c.Assert(err, IsNil, Commentf("Error allocate next auto id")) c.Assert(autoIncID, Equals, uint64(globalAutoID)) // Alter AutoIncID to the next AutoIncID + 100 - table.Schema.AutoIncID = globalAutoID + 100 - db, err := NewDB(s.mock.Storage) + table.Info.AutoIncID = globalAutoID + 100 + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil, Commentf("Error create DB")) tk.MustExec("drop database if exists test;") // Test empty collate value @@ -92,3 +96,39 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err)) c.Assert(autoIncID, Equals, uint64(globalAutoID+100)) } + +func (s *testRestoreSchemaSuite) TestFilterDDLJobs(c *C) { + tk := testkit.NewTestKit(c, s.mock.Storage) + tk.MustExec("CREATE DATABASE IF NOT EXISTS test_db;") + tk.MustExec("CREATE TABLE IF NOT EXISTS test_db.test_table (c1 INT);") + lastTs, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get last ts: %s", err)) + tk.MustExec("RENAME TABLE test_db.test_table to test_db.test_table1;") + tk.MustExec("DROP TABLE test_db.test_table1;") + tk.MustExec("DROP DATABASE test_db;") + tk.MustExec("CREATE DATABASE test_db;") + tk.MustExec("USE test_db;") + tk.MustExec("CREATE TABLE test_table1 (c2 CHAR(255));") + tk.MustExec("RENAME TABLE test_table1 to test_table;") + tk.MustExec("TRUNCATE TABLE test_table;") + + ts, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get ts: %s", err)) + allDDLJobs, err := backup.GetBackupDDLJobs(s.mock.Domain, lastTs, ts) + c.Assert(err, IsNil, Commentf("Error get ddl jobs: %s", err)) + infoSchema, err := s.mock.Domain.GetSnapshotInfoSchema(ts) + c.Assert(err, IsNil, Commentf("Error get snapshot info schema: %s", err)) + dbInfo, ok := infoSchema.SchemaByName(model.NewCIStr("test_db")) + c.Assert(ok, IsTrue, Commentf("DB info not exist")) + tableInfo, err := infoSchema.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_table")) + c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) + tables := []*utils.Table{{ + Db: dbInfo, + Info: tableInfo.Meta(), + }} + ddlJobs := FilterDDLJobs(allDDLJobs, tables) + for _, job := range ddlJobs { + c.Logf("get ddl job: %s", job.Query) + } + c.Assert(len(ddlJobs), Equals, 7) +} diff --git a/pkg/restore/import.go b/pkg/restore/import.go index de35ecaea..58a168f06 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -1,8 +1,11 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( + "bytes" "context" - "strings" + "crypto/tls" "sync" "time" @@ -12,15 +15,17 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" - "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/pd/v3/pkg/codec" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) const importScanRegionTime = 10 * time.Second +const scanRegionPaginationLimit = int(128) // ImporterClient is used to import a file to TiKV type ImporterClient interface { @@ -47,13 +52,15 @@ type importClient struct { mu sync.Mutex metaClient SplitClient clients map[uint64]import_sstpb.ImportSSTClient + tlsConf *tls.Config } // NewImportClient returns a new ImporterClient -func NewImportClient(metaClient SplitClient) ImporterClient { +func NewImportClient(metaClient SplitClient, tlsConf *tls.Config) ImporterClient { return &importClient{ metaClient: metaClient, clients: make(map[uint64]import_sstpb.ImportSSTClient), + tlsConf: tlsConf, } } @@ -107,7 +114,11 @@ func (ic *importClient) getImportClient( if err != nil { return nil, err } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + opt := grpc.WithInsecure() + if ic.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(ic.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) if err != nil { return nil, err } @@ -123,6 +134,10 @@ type FileImporter struct { backend *backup.StorageBackend rateLimit uint64 + isRawKvMode bool + rawStartKey []byte + rawEndKey []byte + ctx context.Context cancel context.CancelFunc } @@ -133,6 +148,7 @@ func NewFileImporter( metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, + isRawKvMode bool, rateLimit uint64, ) FileImporter { ctx, cancel := context.WithCancel(ctx) @@ -142,16 +158,38 @@ func NewFileImporter( ctx: ctx, cancel: cancel, importClient: importClient, + isRawKvMode: isRawKvMode, rateLimit: rateLimit, } } +// SetRawRange sets the range to be restored in raw kv mode. +func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error { + if !importer.isRawKvMode { + return errors.New("file importer is not in raw kv mode") + } + importer.rawStartKey = startKey + importer.rawEndKey = endKey + return nil +} + // Import tries to import a file. // All rules must contain encoded keys. -func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { +func (importer *FileImporter) Import( + file *backup.File, + rejectStoreMap map[uint64]bool, + rewriteRules *RewriteRules, +) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions - startKey, endKey, err := rewriteFileKeys(file, rewriteRules) + var startKey, endKey []byte + var err error + if importer.isRawKvMode { + startKey = file.StartKey + endKey = file.EndKey + } else { + startKey, endKey, err = rewriteFileKeys(file, rewriteRules) + } if err != nil { return err } @@ -159,27 +197,52 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul zap.Stringer("file", file), zap.Binary("startKey", startKey), zap.Binary("endKey", endKey)) + + needReject := len(rejectStoreMap) > 0 + err = utils.WithRetry(importer.ctx, func() error { ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range - regionInfos, err1 := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) - if err1 != nil { - return errors.Trace(err1) + regionInfos, errScanRegion := paginateScanRegion( + ctx, importer.metaClient, startKey, endKey, scanRegionPaginationLimit) + if errScanRegion != nil { + return errors.Trace(errScanRegion) } + + if needReject { + // TODO remove when TiFlash support restore + startTime := time.Now() + log.Info("start to wait for removing rejected stores", zap.Reflect("rejectStores", rejectStoreMap)) + for _, region := range regionInfos { + if !waitForRemoveRejectStores(ctx, importer.metaClient, region, rejectStoreMap) { + log.Error("waiting for removing rejected stores failed", + zap.Stringer("region", region.Region)) + return errors.New("waiting for removing rejected stores failed") + } + } + log.Info("waiting for removing rejected stores done", + zap.Int("regions", len(regionInfos)), zap.Duration("take", time.Since(startTime))) + needReject = false + } + log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { info := regionInfo // Try to download file. var downloadMeta *import_sstpb.SSTMeta - err1 = utils.WithRetry(importer.ctx, func() error { + errDownload := utils.WithRetry(importer.ctx, func() error { var e error - downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + if importer.isRawKvMode { + downloadMeta, e = importer.downloadRawKVSST(info, file) + } else { + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + } return e }, newDownloadSSTBackoffer()) - if err1 != nil { - if err1 == errRewriteRuleNotFound || err1 == errRangeIsEmpty { + if errDownload != nil { + if errDownload == errRewriteRuleNotFound || errDownload == errRangeIsEmpty { // Skip this region continue } @@ -188,35 +251,70 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul zap.Stringer("region", info.Region), zap.Binary("startKey", startKey), zap.Binary("endKey", endKey), - zap.Error(err1)) - return err1 + zap.Error(errDownload)) + return errDownload } - err1 = importer.ingestSST(downloadMeta, info) - // If error is `NotLeader`, update the region info and retry - for errors.Cause(err1) == errNotLeader { - log.Debug("ingest sst returns not leader error, retry it", - zap.Stringer("region", info.Region)) - var newInfo *RegionInfo - newInfo, err1 = importer.metaClient.GetRegion(importer.ctx, info.Region.GetStartKey()) - if err1 != nil { - break + + ingestResp, errIngest := importer.ingestSST(downloadMeta, info) + ingestRetry: + for errIngest == nil { + errPb := ingestResp.GetError() + if errPb == nil { + // Ingest success + break ingestRetry } - if !checkRegionEpoch(newInfo, info) { - err1 = errEpochNotMatch - break + switch { + case errPb.NotLeader != nil: + // If error is `NotLeader`, update the region info and retry + var newInfo *RegionInfo + if newLeader := errPb.GetNotLeader().GetLeader(); newLeader != nil { + newInfo = &RegionInfo{ + Leader: newLeader, + Region: info.Region, + } + } else { + // Slow path, get region from PD + newInfo, errIngest = importer.metaClient.GetRegion( + importer.ctx, info.Region.GetStartKey()) + if errIngest != nil { + break ingestRetry + } + } + log.Debug("ingest sst returns not leader error, retry it", + zap.Stringer("region", info.Region), + zap.Stringer("newLeader", newInfo.Leader)) + + if !checkRegionEpoch(newInfo, info) { + errIngest = errors.AddStack(errEpochNotMatch) + break ingestRetry + } + ingestResp, errIngest = importer.ingestSST(downloadMeta, newInfo) + case errPb.EpochNotMatch != nil: + // TODO handle epoch not match error + // 1. retry download if needed + // 2. retry ingest + errIngest = errors.AddStack(errEpochNotMatch) + break ingestRetry + case errPb.KeyNotInRegion != nil: + errIngest = errors.AddStack(errKeyNotInRegion) + break ingestRetry + default: + // Other errors like `ServerIsBusy`, `RegionNotFound`, etc. should be retryable + errIngest = errors.Annotatef(errIngestFailed, "ingest error %s", errPb) + break ingestRetry } - err1 = importer.ingestSST(downloadMeta, newInfo) } - if err1 != nil { + + if errIngest != nil { log.Error("ingest file failed", zap.Stringer("file", file), zap.Stringer("range", downloadMeta.GetRange()), zap.Stringer("region", info.Region), - zap.Error(err1)) - return err1 + zap.Error(errIngest)) + return errIngest } - summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) - summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) + summary.CollectSuccessUnit(summary.TotalKV, 1, file.TotalKvs) + summary.CollectSuccessUnit(summary.TotalBytes, 1, file.TotalBytes) } return nil }, newImportSSTBackoffer()) @@ -241,7 +339,7 @@ func (importer *FileImporter) downloadSST( return nil, errors.Trace(err) } // Assume one region reflects to one rewrite rule - _, key, err := codec.DecodeBytes(regionInfo.Region.GetStartKey(), []byte{}) + _, key, err := codec.DecodeBytes(regionInfo.Region.GetStartKey()) if err != nil { return nil, err } @@ -254,6 +352,7 @@ func (importer *FileImporter) downloadSST( NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -268,7 +367,10 @@ func (importer *FileImporter) downloadSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, extractDownloadSSTError(err) + return nil, errors.Annotatef(errGrpc, "%s", err) + } + if resp.GetError() != nil { + return nil, errors.Annotate(errDownloadFailed, resp.GetError().GetMessage()) } if resp.GetIsEmpty() { return nil, errors.Trace(errRangeIsEmpty) @@ -279,10 +381,62 @@ func (importer *FileImporter) downloadSST( return &sstMeta, nil } +func (importer *FileImporter) downloadRawKVSST( + regionInfo *RegionInfo, + file *backup.File, +) (*import_sstpb.SSTMeta, error) { + id, err := uuid.New().MarshalBinary() + if err != nil { + return nil, errors.Trace(err) + } + // Empty rule + var rule import_sstpb.RewriteRule + sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + + // Cut the SST file's range to fit in the restoring range. + if bytes.Compare(importer.rawStartKey, sstMeta.Range.GetStart()) > 0 { + sstMeta.Range.Start = importer.rawStartKey + } + // TODO: importer.RawEndKey is exclusive but sstMeta.Range.End is inclusive. How to exclude importer.RawEndKey? + if len(importer.rawEndKey) > 0 && bytes.Compare(importer.rawEndKey, sstMeta.Range.GetEnd()) < 0 { + sstMeta.Range.End = importer.rawEndKey + } + if bytes.Compare(sstMeta.Range.GetStart(), sstMeta.Range.GetEnd()) > 0 { + return nil, errors.Trace(errRangeIsEmpty) + } + + req := &import_sstpb.DownloadRequest{ + Sst: sstMeta, + StorageBackend: importer.backend, + Name: file.GetName(), + RewriteRule: rule, + } + log.Debug("download SST", + zap.Stringer("sstMeta", &sstMeta), + zap.Stringer("region", regionInfo.Region), + ) + var resp *import_sstpb.DownloadResponse + for _, peer := range regionInfo.Region.GetPeers() { + resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) + if err != nil { + return nil, errors.Annotatef(errGrpc, "%s", err) + } + if resp.GetError() != nil { + return nil, errors.Annotate(errDownloadFailed, resp.GetError().GetMessage()) + } + if resp.GetIsEmpty() { + return nil, errors.Trace(errRangeIsEmpty) + } + } + sstMeta.Range.Start = resp.Range.GetStart() + sstMeta.Range.End = resp.Range.GetEnd() + return &sstMeta, nil +} + func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, regionInfo *RegionInfo, -) error { +) (*import_sstpb.IngestResponse, error) { leader := regionInfo.Leader if leader == nil { leader = regionInfo.Region.GetPeers()[0] @@ -296,26 +450,12 @@ func (importer *FileImporter) ingestSST( Context: reqCtx, Sst: sstMeta, } - log.Debug("download SST", zap.Stringer("sstMeta", sstMeta)) + log.Debug("ingest SST", zap.Stringer("sstMeta", sstMeta), zap.Reflect("leader", leader)) resp, err := importer.importClient.IngestSST(importer.ctx, leader.GetStoreId(), req) if err != nil { - if strings.Contains(err.Error(), "RegionNotFound") { - return errors.Trace(errRegionNotFound) - } - return errors.Trace(err) - } - respErr := resp.GetError() - if respErr != nil { - log.Debug("ingest sst resp error", zap.Stringer("error", respErr)) - if respErr.GetKeyNotInRegion() != nil { - return errors.Trace(errKeyNotInRegion) - } - if respErr.GetNotLeader() != nil { - return errors.Trace(errNotLeader) - } - return errors.Wrap(errResp, respErr.String()) + return nil, errors.Trace(err) } - return nil + return resp, nil } func checkRegionEpoch(new, old *RegionInfo) bool { @@ -326,18 +466,3 @@ func checkRegionEpoch(new, old *RegionInfo) bool { } return false } - -func extractDownloadSSTError(e error) error { - err := errGrpc - switch { - case strings.Contains(e.Error(), "bad format"): - err = errBadFormat - case strings.Contains(e.Error(), "wrong prefix"): - err = errWrongKeyPrefix - case strings.Contains(e.Error(), "corrupted"): - err = errFileCorrupted - case strings.Contains(e.Error(), "Cannot read"): - err = errCannotRead - } - return errors.Trace(err) -} diff --git a/pkg/restore/range.go b/pkg/restore/range.go index f3914539e..0d5192ca9 100644 --- a/pkg/restore/range.go +++ b/pkg/restore/range.go @@ -1,45 +1,21 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( - "bytes" - "fmt" - - "github.com/google/btree" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/tidb/tablecodec" "go.uber.org/zap" -) - -// Range represents a range of keys. -type Range struct { - StartKey []byte - EndKey []byte -} - -// String formats a range to a string -func (r *Range) String() string { - return fmt.Sprintf("[%x %x]", r.StartKey, r.EndKey) -} -// Less compares a range with a btree.Item -func (r *Range) Less(than btree.Item) bool { - t := than.(*Range) - return len(r.EndKey) != 0 && bytes.Compare(r.EndKey, t.StartKey) <= 0 -} - -// contains returns if a key is included in the range. -func (r *Range) contains(key []byte) bool { - start, end := r.StartKey, r.EndKey - return bytes.Compare(key, start) >= 0 && - (len(end) == 0 || bytes.Compare(key, end) < 0) -} + "github.com/pingcap/br/pkg/rtree" +) // sortRanges checks if the range overlapped and sort them -func sortRanges(ranges []Range, rewriteRules *RewriteRules) ([]Range, error) { - rangeTree := NewRangeTree() +func sortRanges(ranges []rtree.Range, rewriteRules *RewriteRules) ([]rtree.Range, error) { + rangeTree := rtree.NewRangeTree() for _, rg := range ranges { if rewriteRules != nil { startID := tablecodec.DecodeTableID(rg.StartKey) @@ -77,64 +53,10 @@ func sortRanges(ranges []Range, rewriteRules *RewriteRules) ([]Range, error) { return nil, errors.Errorf("ranges overlapped: %s, %s", out, rg) } } - sortedRanges := make([]Range, 0, len(ranges)) - rangeTree.Ascend(func(rg *Range) bool { - if rg == nil { - return false - } - sortedRanges = append(sortedRanges, *rg) - return true - }) + sortedRanges := rangeTree.GetSortedRanges() return sortedRanges, nil } -// RangeTree stores the ranges in an orderly manner. -// All the ranges it stored do not overlap. -type RangeTree struct { - tree *btree.BTree -} - -// NewRangeTree returns a new RangeTree. -func NewRangeTree() *RangeTree { - return &RangeTree{tree: btree.New(32)} -} - -// Find returns nil or a range in the range tree -func (rt *RangeTree) Find(key []byte) *Range { - var ret *Range - r := &Range{ - StartKey: key, - } - rt.tree.DescendLessOrEqual(r, func(i btree.Item) bool { - ret = i.(*Range) - return false - }) - if ret == nil || !ret.contains(key) { - return nil - } - return ret -} - -// InsertRange inserts ranges into the range tree. -// it returns true if all ranges inserted successfully. -// it returns false if there are some overlapped ranges. -func (rt *RangeTree) InsertRange(rg Range) btree.Item { - return rt.tree.ReplaceOrInsert(&rg) -} - -// RangeIterator allows callers of Ascend to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend function will immediately return. -type RangeIterator func(rg *Range) bool - -// Ascend calls the iterator for every value in the tree within [first, last], -// until the iterator returns false. -func (rt *RangeTree) Ascend(iterator RangeIterator) { - rt.tree.Ascend(func(i btree.Item) bool { - return iterator(i.(*Range)) - }) -} - // RegionInfo includes a region and the leader of the region. type RegionInfo struct { Region *metapb.Region diff --git a/pkg/restore/range_test.go b/pkg/restore/range_test.go index a9edc5b82..37561f6b4 100644 --- a/pkg/restore/range_test.go +++ b/pkg/restore/range_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -6,6 +8,8 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/tidb/tablecodec" + + "github.com/pingcap/br/pkg/rtree" ) type testRangeSuite struct{} @@ -21,8 +25,8 @@ var RangeEquals Checker = &rangeEquals{ } func (checker *rangeEquals) Check(params []interface{}, names []string) (result bool, error string) { - obtained := params[0].([]Range) - expected := params[1].([]Range) + obtained := params[0].([]rtree.Range) + expected := params[1].([]rtree.Range) if len(obtained) != len(expected) { return false, "" } @@ -44,20 +48,20 @@ func (s *testRangeSuite) TestSortRange(c *C) { Table: make([]*import_sstpb.RewriteRule, 0), Data: dataRules, } - ranges1 := []Range{ - {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), - append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...)}, + ranges1 := []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...), Files: nil}, } rs1, err := sortRanges(ranges1, rewriteRules) c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) - c.Assert(rs1, RangeEquals, []Range{ - {append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), - append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...)}, + c.Assert(rs1, RangeEquals, []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...), Files: nil}, }) - ranges2 := []Range{ - {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), - append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...)}, + ranges2 := []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...), Files: nil}, } _, err = sortRanges(ranges2, rewriteRules) c.Assert(err, ErrorMatches, ".*table id does not match.*") @@ -66,10 +70,10 @@ func (s *testRangeSuite) TestSortRange(c *C) { rewriteRules1 := initRewriteRules() rs3, err := sortRanges(ranges3, rewriteRules1) c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) - c.Assert(rs3, RangeEquals, []Range{ - {[]byte("bbd"), []byte("bbf")}, - {[]byte("bbf"), []byte("bbj")}, - {[]byte("xxa"), []byte("xxe")}, - {[]byte("xxe"), []byte("xxz")}, + c.Assert(rs3, RangeEquals, []rtree.Range{ + {StartKey: []byte("bbd"), EndKey: []byte("bbf"), Files: nil}, + {StartKey: []byte("bbf"), EndKey: []byte("bbj"), Files: nil}, + {StartKey: []byte("xxa"), EndKey: []byte("xxe"), Files: nil}, + {StartKey: []byte("xxe"), EndKey: []byte("xxz"), Files: nil}, }) } diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 3248fdd0d..4138d0012 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -1,16 +1,22 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( "bytes" "context" + "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" ) // Constants for split retry machinery. @@ -26,8 +32,11 @@ const ( ScatterWaitMaxRetryTimes = 64 ScatterWaitInterval = 50 * time.Millisecond ScatterMaxWaitInterval = time.Second - ScatterWaitUpperInterval = 180 * time.Second + + RejectStoreCheckRetryTimes = 64 + RejectStoreCheckInterval = 100 * time.Millisecond + RejectStoreMaxCheckInterval = 2 * time.Second ) // RegionSplitter is a executor of region split by rules. @@ -52,7 +61,7 @@ type OnSplitFunc func(key [][]byte) // note: all ranges and rewrite rules must have raw key. func (rs *RegionSplitter) Split( ctx context.Context, - ranges []Range, + ranges []rtree.Range, rewriteRules *RewriteRules, onSplit OnSplitFunc, ) error { @@ -61,9 +70,9 @@ func (rs *RegionSplitter) Split( } startTime := time.Now() // Sort the range for getting the min and max key of the ranges - sortedRanges, err := sortRanges(ranges, rewriteRules) - if err != nil { - return errors.Trace(err) + sortedRanges, errSplit := sortRanges(ranges, rewriteRules) + if errSplit != nil { + return errors.Trace(errSplit) } minKey := codec.EncodeBytes([]byte{}, sortedRanges[0].StartKey) maxKey := codec.EncodeBytes([]byte{}, sortedRanges[len(sortedRanges)-1].EndKey) @@ -87,10 +96,9 @@ func (rs *RegionSplitter) Split( scatterRegions := make([]*RegionInfo, 0) SplitRegions: for i := 0; i < SplitRetryTimes; i++ { - var regions []*RegionInfo - regions, err = rs.client.ScanRegions(ctx, minKey, maxKey, 0) - if err != nil { - return errors.Trace(err) + regions, errScan := paginateScanRegion(ctx, rs.client, minKey, maxKey, scanRegionPaginationLimit) + if errScan != nil { + return errors.Trace(errScan) } if len(regions) == 0 { log.Warn("cannot scan any region") @@ -103,27 +111,38 @@ SplitRegions: } for regionID, keys := range splitKeyMap { var newRegions []*RegionInfo - newRegions, err = rs.splitAndScatterRegions(ctx, regionMap[regionID], keys) - if err != nil { + region := regionMap[regionID] + newRegions, errSplit = rs.splitAndScatterRegions(ctx, region, keys) + if errSplit != nil { + if strings.Contains(errSplit.Error(), "no valid key") { + for _, key := range keys { + log.Error("no valid key", + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey), + zap.Binary("key", codec.EncodeBytes([]byte{}, key))) + } + return errors.Trace(errSplit) + } interval = 2 * interval if interval > SplitMaxRetryInterval { interval = SplitMaxRetryInterval } time.Sleep(interval) if i > 3 { - log.Warn("splitting regions failed, retry it", zap.Error(err), zap.ByteStrings("keys", keys)) + log.Warn("splitting regions failed, retry it", zap.Error(errSplit), zap.ByteStrings("keys", keys)) } continue SplitRegions } + log.Debug("split regions", zap.Stringer("region", region.Region), zap.ByteStrings("keys", keys)) scatterRegions = append(scatterRegions, newRegions...) onSplit(keys) } break } - if err != nil { - return errors.Trace(err) + if errSplit != nil { + return errors.Trace(errSplit) } - log.Info("splitting regions done, wait for scattering regions", + log.Info("start to wait for scattering regions", zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) startTime = time.Now() scatterCount := 0 @@ -240,7 +259,7 @@ func (rs *RegionSplitter) splitAndScatterRegions( // getSplitKeys checks if the regions should be split by the new prefix of the rewrites rule and the end key of // the ranges, groups the split keys by region id -func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionInfo) map[uint64][][]byte { +func getSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*RegionInfo) map[uint64][][]byte { splitKeyMap := make(map[uint64][][]byte) checkKeys := make([][]byte, 0) for _, rule := range rewriteRules.Table { @@ -250,7 +269,7 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) } for _, rg := range ranges { - checkKeys = append(checkKeys, rg.EndKey) + checkKeys = append(checkKeys, truncateRowKey(rg.EndKey)) } for _, key := range checkKeys { if region := needSplit(key, regions); region != nil { @@ -259,7 +278,10 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI splitKeys = make([][]byte, 0, 1) } splitKeyMap[region.Region.GetId()] = append(splitKeys, key) - log.Debug("get key for split region", zap.Binary("key", key), zap.Stringer("region", region.Region)) + log.Debug("get key for split region", + zap.Binary("key", key), + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey)) } } return splitKeyMap @@ -285,6 +307,21 @@ func needSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { return nil } +var ( + tablePrefix = []byte{'t'} + idLen = 8 + recordPrefix = []byte("_r") +) + +func truncateRowKey(key []byte) []byte { + if bytes.HasPrefix(key, tablePrefix) && + len(key) > tablecodec.RecordRowKeyLen && + bytes.HasPrefix(key[len(tablePrefix)+idLen:], recordPrefix) { + return key[:tablecodec.RecordRowKeyLen] + } + return key +} + func beforeEnd(key []byte, end []byte) bool { return bytes.Compare(key, end) < 0 || len(end) == 0 } diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go index 8a618a191..a461e0576 100644 --- a/pkg/restore/split_client.go +++ b/pkg/restore/split_client.go @@ -1,8 +1,11 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "io/ioutil" @@ -17,9 +20,10 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/tikvpb" - pd "github.com/pingcap/pd/client" - "github.com/pingcap/pd/server/schedule/placement" + pd "github.com/pingcap/pd/v3/client" + "github.com/pingcap/pd/v3/server/schedule/placement" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) // SplitClient is an external client used by RegionSplitter. @@ -58,13 +62,15 @@ type SplitClient interface { type pdClient struct { mu sync.Mutex client pd.Client + tlsConf *tls.Config storeCache map[uint64]*metapb.Store } // NewSplitClient returns a client used by RegionSplitter. -func NewSplitClient(client pd.Client) SplitClient { +func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient { return &pdClient{ client: client, + tlsConf: tlsConf, storeCache: make(map[uint64]*metapb.Store), } } @@ -199,7 +205,11 @@ func (c *pdClient) BatchSplitRegions( if err != nil { return nil, err } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + opt := grpc.WithInsecure() + if c.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) if err != nil { return nil, err } diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 509c4cfa0..32ac3e14d 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( @@ -10,21 +12,30 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/pd/server/schedule/placement" + "github.com/pingcap/pd/v3/server/core" + "github.com/pingcap/pd/v3/server/schedule/placement" "github.com/pingcap/tidb/util/codec" + + "github.com/pingcap/br/pkg/rtree" ) type testClient struct { mu sync.RWMutex stores map[uint64]*metapb.Store regions map[uint64]*RegionInfo + regionsInfo *core.RegionsInfo // For now it's only used in ScanRegions nextRegionID uint64 } func newTestClient(stores map[uint64]*metapb.Store, regions map[uint64]*RegionInfo, nextRegionID uint64) *testClient { + regionsInfo := core.NewRegionsInfo() + for _, regionInfo := range regions { + regionsInfo.AddRegion(core.NewRegionInfo(regionInfo.Region, regionInfo.Leader)) + } return &testClient{ stores: stores, regions: regions, + regionsInfo: regionsInfo, nextRegionID: nextRegionID, } } @@ -142,16 +153,13 @@ func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.Ge } func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { - regions := make([]*RegionInfo, 0) - for _, region := range c.regions { - if limit > 0 && len(regions) >= limit { - break - } - if (len(region.Region.GetEndKey()) != 0 && bytes.Compare(region.Region.GetEndKey(), key) <= 0) || - bytes.Compare(region.Region.GetStartKey(), endKey) > 0 { - continue - } - regions = append(regions, region) + infos := c.regionsInfo.ScanRange(key, endKey, limit) + regions := make([]*RegionInfo, 0, len(infos)) + for _, info := range infos { + regions = append(regions, &RegionInfo{ + Region: info.GetMeta(), + Leader: info.GetLeader(), + }) } return regions, nil } @@ -234,21 +242,21 @@ func initTestClient() *testClient { } // range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) -func initRanges() []Range { - var ranges [4]Range - ranges[0] = Range{ +func initRanges() []rtree.Range { + var ranges [4]rtree.Range + ranges[0] = rtree.Range{ StartKey: []byte("aaa"), EndKey: []byte("aae"), } - ranges[1] = Range{ + ranges[1] = rtree.Range{ StartKey: []byte("aae"), EndKey: []byte("aaz"), } - ranges[2] = Range{ + ranges[2] = rtree.Range{ StartKey: []byte("ccd"), EndKey: []byte("ccf"), } - ranges[3] = Range{ + ranges[3] = rtree.Range{ StartKey: []byte("ccf"), EndKey: []byte("ccj"), } @@ -280,7 +288,7 @@ func validateRegions(regions map[uint64]*RegionInfo) bool { return false } FindRegion: - for i := 1; i < 12; i++ { + for i := 1; i < len(keys); i++ { for _, region := range regions { startKey := []byte(keys[i-1]) if len(startKey) != 0 { @@ -299,3 +307,26 @@ FindRegion: } return true } + +func (s *testRestoreUtilSuite) TestNeedSplit(c *C) { + regions := []*RegionInfo{ + { + Region: &metapb.Region{ + StartKey: codec.EncodeBytes([]byte{}, []byte("b")), + EndKey: codec.EncodeBytes([]byte{}, []byte("d")), + }, + }, + } + // Out of region + c.Assert(needSplit([]byte("a"), regions), IsNil) + // Region start key + c.Assert(needSplit([]byte("b"), regions), IsNil) + // In region + region := needSplit([]byte("c"), regions) + c.Assert(bytes.Compare(region.Region.GetStartKey(), codec.EncodeBytes([]byte{}, []byte("b"))), Equals, 0) + c.Assert(bytes.Compare(region.Region.GetEndKey(), codec.EncodeBytes([]byte{}, []byte("d"))), Equals, 0) + // Region end key + c.Assert(needSplit([]byte("d"), regions), IsNil) + // Out of region + c.Assert(needSplit([]byte("e"), regions), IsNil) +} diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 63ee92969..2652b1e7b 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -1,8 +1,11 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( "bytes" "context" + "encoding/hex" "strings" "time" @@ -13,49 +16,17 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" ) var recordPrefixSep = []byte("_r") -// idAllocator always returns a specified ID -type idAllocator struct { - id int64 -} - -func newIDAllocator(id int64) *idAllocator { - return &idAllocator{id: id} -} - -func (alloc *idAllocator) Alloc(tableID int64, n uint64, increment, offset int64) (min int64, max int64, err error) { - return alloc.id, alloc.id, nil -} - -func (alloc *idAllocator) Rebase(tableID, newBase int64, allocIDs bool) error { - return nil -} - -func (alloc *idAllocator) Base() int64 { - return alloc.id -} - -func (alloc *idAllocator) End() int64 { - return alloc.id -} - -func (alloc *idAllocator) NextGlobalAutoID(tableID int64) (int64, error) { - return alloc.id, nil -} - -func (alloc *idAllocator) GetType() autoid.AllocatorType { - return autoid.RowIDAllocType -} - // GetRewriteRules returns the rewrite rule of the new table and the old table. func GetRewriteRules( newTable *model.TableInfo, @@ -153,8 +124,8 @@ func getSSTMetaFromFile( func ValidateFileRanges( files []*backup.File, rewriteRules *RewriteRules, -) ([]Range, error) { - ranges := make([]Range, 0, len(files)) +) ([]rtree.Range, error) { + ranges := make([]rtree.Range, 0, len(files)) fileAppended := make(map[string]bool) for _, file := range files { @@ -173,7 +144,7 @@ func ValidateFileRanges( zap.Stringer("file", file)) return nil, errors.New("table ids dont match") } - ranges = append(ranges, Range{ + ranges = append(ranges, rtree.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }) @@ -183,6 +154,39 @@ func ValidateFileRanges( return ranges, nil } +// AttachFilesToRanges attach files to ranges. +// Panic if range is overlapped or no range for files. +func AttachFilesToRanges( + files []*backup.File, + ranges []rtree.Range, +) []rtree.Range { + rangeTree := rtree.NewRangeTree() + for _, rg := range ranges { + rangeTree.Update(rg) + } + for _, f := range files { + + rg := rangeTree.Find(&rtree.Range{ + StartKey: f.GetStartKey(), + EndKey: f.GetEndKey(), + }) + if rg == nil { + log.Fatal("range not found", + zap.Binary("startKey", f.GetStartKey()), + zap.Binary("endKey", f.GetEndKey())) + } + file := *f + rg.Files = append(rg.Files, &file) + } + if rangeTree.Len() != len(ranges) { + log.Fatal("ranges overlapped", + zap.Int("ranges length", len(ranges)), + zap.Int("tree length", rangeTree.Len())) + } + sortedRanges := rangeTree.GetSortedRanges() + return sortedRanges +} + // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error { // Check if the start key has a matched rewrite key @@ -266,6 +270,9 @@ func matchNewPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.Rewrit } func truncateTS(key []byte) []byte { + if len(key) == 0 { + return nil + } return key[:len(key)-8] } @@ -275,19 +282,20 @@ func truncateTS(key []byte) []byte { func SplitRanges( ctx context.Context, client *Client, - ranges []Range, + ranges []rtree.Range, rewriteRules *RewriteRules, - updateCh chan<- struct{}, + updateCh glue.Progress, ) error { start := time.Now() defer func() { elapsed := time.Since(start) summary.CollectDuration("split region", elapsed) }() - splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient())) + splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) + return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { for range keys { - updateCh <- struct{}{} + updateCh.Inc() } }) } @@ -299,6 +307,10 @@ func rewriteFileKeys(file *backup.File, rewriteRules *RewriteRules) (startKey, e if startID == endID { startKey, rule = rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && rule == nil { + log.Error("cannot find rewrite rule", + zap.Binary("startKey", file.GetStartKey()), + zap.Reflect("rewrite table", rewriteRules.Table), + zap.Reflect("rewrite data", rewriteRules.Data)) err = errors.New("cannot find rewrite rule for start key") return } @@ -325,11 +337,90 @@ func encodeKeyPrefix(key []byte) []byte { return append(encodedPrefix[:len(encodedPrefix)-9], key[len(key)-ungroupedLen:]...) } -// escape the identifier for pretty-printing. -// For instance, the identifier "foo `bar`" will become "`foo ``bar```". -// The sqlMode controls whether to escape with backquotes (`) or double quotes -// (`"`) depending on whether mysql.ModeANSIQuotes is enabled. -func escapeTableName(cis model.CIStr) string { - quote := "`" - return quote + strings.Replace(cis.O, quote, quote+quote, -1) + quote +// paginateScanRegion scan regions with a limit pagination and +// return all regions at once. +// It reduces max gRPC message size. +func paginateScanRegion( + ctx context.Context, client SplitClient, startKey, endKey []byte, limit int, +) ([]*RegionInfo, error) { + if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 { + return nil, errors.Errorf("startKey >= endKey, startKey %s, endkey %s", + hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + } + + regions := []*RegionInfo{} + for { + batch, err := client.ScanRegions(ctx, startKey, endKey, limit) + if err != nil { + return nil, errors.Trace(err) + } + regions = append(regions, batch...) + if len(batch) < limit { + // No more region + break + } + startKey = batch[len(batch)-1].Region.GetEndKey() + if len(startKey) == 0 || + (len(endKey) > 0 && bytes.Compare(startKey, endKey) >= 0) { + // All key space have scanned + break + } + } + return regions, nil +} + +func hasRejectStorePeer( + ctx context.Context, + client SplitClient, + regionID uint64, + rejectStores map[uint64]bool, +) (bool, error) { + regionInfo, err := client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + if regionInfo == nil { + return false, nil + } + for _, peer := range regionInfo.Region.GetPeers() { + if rejectStores[peer.GetStoreId()] { + return true, nil + } + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 10 { + log.Warn("get region info", zap.Stringer("region", regionInfo.Region)) + } + return false, nil +} + +func waitForRemoveRejectStores( + ctx context.Context, + client SplitClient, + regionInfo *RegionInfo, + rejectStores map[uint64]bool, +) bool { + interval := RejectStoreCheckInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < RejectStoreCheckRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := hasRejectStorePeer(ctx1, client, regionID, rejectStores) + if err != nil { + log.Warn("wait for rejecting store failed", + zap.Stringer("region", regionInfo.Region), + zap.Error(err)) + return false + } + // Do not have any peer in the rejected store, return true + if !ok { + return true + } + interval = 2 * interval + if interval > RejectStoreMaxCheckInterval { + interval = RejectStoreMaxCheckInterval + } + time.Sleep(interval) + } + + return false } diff --git a/pkg/restore/util_test.go b/pkg/restore/util_test.go index bc4da9168..d1a738fdb 100644 --- a/pkg/restore/util_test.go +++ b/pkg/restore/util_test.go @@ -1,11 +1,17 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( + "context" + "encoding/binary" + . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" ) var _ = Suite(&testRestoreUtilSuite{}) @@ -103,3 +109,105 @@ func (s *testRestoreUtilSuite) TestValidateFileRanges(c *C) { ) c.Assert(err, ErrorMatches, "unexpected rewrite rules") } + +func (s *testRestoreUtilSuite) TestPaginateScanRegion(c *C) { + peers := make([]*metapb.Peer, 1) + peers[0] = &metapb.Peer{ + Id: 1, + StoreId: 1, + } + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } + + makeRegions := func(num uint64) (map[uint64]*RegionInfo, []*RegionInfo) { + regionsMap := make(map[uint64]*RegionInfo, num) + regions := make([]*RegionInfo, 0, num) + endKey := make([]byte, 8) + for i := uint64(0); i < num-1; i++ { + ri := &RegionInfo{ + Region: &metapb.Region{ + Id: i + 1, + Peers: peers, + }, + } + + if i != 0 { + startKey := make([]byte, 8) + binary.BigEndian.PutUint64(startKey, i) + ri.Region.StartKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey = make([]byte, 8) + binary.BigEndian.PutUint64(endKey, i+1) + ri.Region.EndKey = codec.EncodeBytes([]byte{}, endKey) + + regionsMap[i] = ri + regions = append(regions, ri) + } + + if num == 1 { + endKey = []byte{} + } else { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + ri := &RegionInfo{ + Region: &metapb.Region{ + Id: num, + Peers: peers, + StartKey: endKey, + EndKey: []byte{}, + }, + } + regionsMap[num] = ri + regions = append(regions, ri) + + return regionsMap, regions + } + + ctx := context.Background() + regionMap := make(map[uint64]*RegionInfo) + regions := []*RegionInfo{} + batch, err := paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(1) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(2) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(3) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(8) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(8) + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), regions[1].Region.StartKey, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[1:]) + + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), []byte{}, regions[6].Region.EndKey, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[:7]) + + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), regions[1].Region.StartKey, regions[1].Region.EndKey, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[1:2]) + + _, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{2}, []byte{1}, 3) + c.Assert(err, ErrorMatches, "startKey >= endKey.*") +} diff --git a/pkg/backup/range_tree.go b/pkg/rtree/rtree.go similarity index 61% rename from pkg/backup/range_tree.go rename to pkg/rtree/rtree.go index 4d4b3c695..08b757af5 100644 --- a/pkg/backup/range_tree.go +++ b/pkg/rtree/rtree.go @@ -1,8 +1,10 @@ -package backup +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package rtree import ( "bytes" - "encoding/hex" + "fmt" "github.com/google/btree" "github.com/pingcap/kvproto/pkg/backup" @@ -15,10 +17,15 @@ type Range struct { StartKey []byte EndKey []byte Files []*backup.File - Error *backup.Error } -func (rg *Range) intersect( +// String formats a range to a string +func (rg *Range) String() string { + return fmt.Sprintf("[%x %x]", rg.StartKey, rg.EndKey) +} + +// Intersect returns +func (rg *Range) Intersect( start, end []byte, ) (subStart, subEnd []byte, isIntersect bool) { // empty mean the max end key @@ -49,8 +56,8 @@ func (rg *Range) intersect( return } -// contains check if the range contains the given key, [start, end) -func (rg *Range) contains(key []byte) bool { +// Contains check if the range contains the given key, [start, end) +func (rg *Range) Contains(key []byte) bool { start, end := rg.StartKey, rg.EndKey return bytes.Compare(key, start) >= 0 && (len(end) == 0 || bytes.Compare(key, end) < 0) @@ -65,31 +72,29 @@ func (rg *Range) Less(than btree.Item) bool { var _ btree.Item = &Range{} -// RangeTree is the result of a backup task +// RangeTree is sorted tree for Ranges. +// All the ranges it stored do not overlap. type RangeTree struct { - tree *btree.BTree + *btree.BTree } -func newRangeTree() RangeTree { +// NewRangeTree returns an empty range tree. +func NewRangeTree() RangeTree { return RangeTree{ - tree: btree.New(32), + BTree: btree.New(32), } } -func (rangeTree *RangeTree) len() int { - return rangeTree.tree.Len() -} - -// find is a helper function to find an item that contains the range start +// Find is a helper function to find an item that contains the range start // key. -func (rangeTree *RangeTree) find(rg *Range) *Range { +func (rangeTree *RangeTree) Find(rg *Range) *Range { var ret *Range - rangeTree.tree.DescendLessOrEqual(rg, func(i btree.Item) bool { + rangeTree.DescendLessOrEqual(rg, func(i btree.Item) bool { ret = i.(*Range) return false }) - if ret == nil || !ret.contains(rg.StartKey) { + if ret == nil || !ret.Contains(rg.StartKey) { return nil } @@ -104,13 +109,13 @@ func (rangeTree *RangeTree) getOverlaps(rg *Range) []*Range { // find() will return Range of range_a // and both startKey of range_a and range_b are less than endKey of range_d, // thus they are regarded as overlapped ranges. - found := rangeTree.find(rg) + found := rangeTree.Find(rg) if found == nil { found = rg } var overlaps []*Range - rangeTree.tree.AscendGreaterOrEqual(found, func(i btree.Item) bool { + rangeTree.AscendGreaterOrEqual(found, func(i btree.Item) bool { over := i.(*Range) if len(rg.EndKey) > 0 && bytes.Compare(rg.EndKey, over.StartKey) <= 0 { return false @@ -121,31 +126,57 @@ func (rangeTree *RangeTree) getOverlaps(rg *Range) []*Range { return overlaps } -func (rangeTree *RangeTree) update(rg *Range) { - overlaps := rangeTree.getOverlaps(rg) +// Update inserts range into tree and delete overlapping ranges. +func (rangeTree *RangeTree) Update(rg Range) { + overlaps := rangeTree.getOverlaps(&rg) // Range has backuped, overwrite overlapping range. for _, item := range overlaps { log.Info("delete overlapping range", zap.Binary("StartKey", item.StartKey), zap.Binary("EndKey", item.EndKey), ) - rangeTree.tree.Delete(item) + rangeTree.Delete(item) } - rangeTree.tree.ReplaceOrInsert(rg) + rangeTree.ReplaceOrInsert(&rg) } -func (rangeTree *RangeTree) put( +// Put forms a range and inserts it into tree. +func (rangeTree *RangeTree) Put( startKey, endKey []byte, files []*backup.File, ) { - rg := &Range{ + rg := Range{ StartKey: startKey, EndKey: endKey, Files: files, } - rangeTree.update(rg) + rangeTree.Update(rg) +} + +// InsertRange inserts ranges into the range tree. +// It returns a non-nil range if there are soe overlapped ranges. +func (rangeTree *RangeTree) InsertRange(rg Range) *Range { + out := rangeTree.ReplaceOrInsert(&rg) + if out == nil { + return nil + } + return out.(*Range) +} + +// GetSortedRanges collects and returns sorted ranges. +func (rangeTree *RangeTree) GetSortedRanges() []Range { + sortedRanges := make([]Range, 0, rangeTree.Len()) + rangeTree.Ascend(func(rg btree.Item) bool { + if rg == nil { + return false + } + sortedRanges = append(sortedRanges, *rg.(*Range)) + return true + }) + return sortedRanges } -func (rangeTree *RangeTree) getIncompleteRange( +// GetIncompleteRange returns missing range covered by startKey and endKey. +func (rangeTree *RangeTree) GetIncompleteRange( startKey, endKey []byte, ) []Range { if len(startKey) != 0 && bytes.Equal(startKey, endKey) { @@ -155,14 +186,14 @@ func (rangeTree *RangeTree) getIncompleteRange( requsetRange := Range{StartKey: startKey, EndKey: endKey} lastEndKey := startKey pviot := &Range{StartKey: startKey} - if first := rangeTree.find(pviot); first != nil { + if first := rangeTree.Find(pviot); first != nil { pviot.StartKey = first.StartKey } - rangeTree.tree.AscendGreaterOrEqual(pviot, func(i btree.Item) bool { + rangeTree.AscendGreaterOrEqual(pviot, func(i btree.Item) bool { rg := i.(*Range) if bytes.Compare(lastEndKey, rg.StartKey) < 0 { start, end, isIntersect := - requsetRange.intersect(lastEndKey, rg.StartKey) + requsetRange.Intersect(lastEndKey, rg.StartKey) if isIntersect { // There is a gap between the last item and the current item. incomplete = @@ -176,7 +207,7 @@ func (rangeTree *RangeTree) getIncompleteRange( // Check whether we need append the last range if !bytes.Equal(lastEndKey, endKey) && len(lastEndKey) != 0 && (len(endKey) == 0 || bytes.Compare(lastEndKey, endKey) < 0) { - start, end, isIntersect := requsetRange.intersect(lastEndKey, endKey) + start, end, isIntersect := requsetRange.Intersect(lastEndKey, endKey) if isIntersect { incomplete = append(incomplete, Range{StartKey: start, EndKey: end}) @@ -184,24 +215,3 @@ func (rangeTree *RangeTree) getIncompleteRange( } return incomplete } - -func (rangeTree *RangeTree) checkDupFiles() { - // Name -> SHA256 - files := make(map[string][]byte) - rangeTree.tree.Ascend(func(i btree.Item) bool { - rg := i.(*Range) - for _, f := range rg.Files { - old, ok := files[f.Name] - if ok { - log.Error("dup file", - zap.String("Name", f.Name), - zap.String("SHA256_1", hex.EncodeToString(old)), - zap.String("SHA256_2", hex.EncodeToString(f.Sha256)), - ) - } else { - files[f.Name] = f.Sha256 - } - } - return true - }) -} diff --git a/pkg/backup/range_tree_test.go b/pkg/rtree/rtree_test.go similarity index 64% rename from pkg/backup/range_tree_test.go rename to pkg/rtree/rtree_test.go index a7c2d1cd1..d3e151e25 100644 --- a/pkg/backup/range_tree_test.go +++ b/pkg/rtree/rtree_test.go @@ -1,17 +1,6 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package backup +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package rtree import ( "fmt" @@ -31,63 +20,19 @@ func newRange(start, end []byte) *Range { } } -func (s *testRangeTreeSuite) TestRangeIntersect(c *C) { - rg := newRange([]byte("a"), []byte("c")) - - start, end, isIntersect := rg.intersect([]byte(""), []byte("")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("c")) - - start, end, isIntersect = rg.intersect([]byte(""), []byte("a")) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) - - start, end, isIntersect = rg.intersect([]byte(""), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("a"), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("aa"), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("aa")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("b"), []byte("c")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("b")) - c.Assert(end, DeepEquals, []byte("c")) - - start, end, isIntersect = rg.intersect([]byte(""), []byte{1}) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) - - start, end, isIntersect = rg.intersect([]byte("c"), []byte("")) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) -} - func (s *testRangeTreeSuite) TestRangeTree(c *C) { - rangeTree := newRangeTree() - c.Assert(rangeTree.tree.Get(newRange([]byte(""), []byte(""))), IsNil) + rangeTree := NewRangeTree() + c.Assert(rangeTree.Get(newRange([]byte(""), []byte(""))), IsNil) search := func(key []byte) *Range { - rg := rangeTree.tree.Get(newRange(key, []byte(""))) + rg := rangeTree.Get(newRange(key, []byte(""))) if rg == nil { return nil } return rg.(*Range) } assertIncomplete := func(startKey, endKey []byte, ranges []Range) { - incomplete := rangeTree.getIncompleteRange(startKey, endKey) + incomplete := rangeTree.GetIncompleteRange(startKey, endKey) c.Logf("%#v %#v\n%#v\n%#v\n", startKey, endKey, incomplete, ranges) c.Assert(len(incomplete), Equals, len(ranges)) for idx, rg := range incomplete { @@ -111,8 +56,8 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { rangeC := newRange([]byte("c"), []byte("d")) rangeD := newRange([]byte("d"), []byte("")) - rangeTree.update(rangeA) - c.Assert(rangeTree.len(), Equals, 1) + rangeTree.Update(*rangeA) + c.Assert(rangeTree.Len(), Equals, 1) assertIncomplete([]byte("a"), []byte("b"), []Range{}) assertIncomplete([]byte(""), []byte(""), []Range{ @@ -120,8 +65,8 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { {StartKey: []byte("b"), EndKey: []byte("")}, }) - rangeTree.update(rangeC) - c.Assert(rangeTree.len(), Equals, 2) + rangeTree.Update(*rangeC) + c.Assert(rangeTree.Len(), Equals, 2) assertIncomplete([]byte("a"), []byte("c"), []Range{ {StartKey: []byte("b"), EndKey: []byte("c")}, }) @@ -136,55 +81,99 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { }) c.Assert(search([]byte{}), IsNil) - c.Assert(search([]byte("a")), Equals, rangeA) + c.Assert(search([]byte("a")), DeepEquals, rangeA) c.Assert(search([]byte("b")), IsNil) - c.Assert(search([]byte("c")), Equals, rangeC) + c.Assert(search([]byte("c")), DeepEquals, rangeC) c.Assert(search([]byte("d")), IsNil) - rangeTree.update(rangeB) - c.Assert(rangeTree.len(), Equals, 3) - c.Assert(search([]byte("b")), Equals, rangeB) + rangeTree.Update(*rangeB) + c.Assert(rangeTree.Len(), Equals, 3) + c.Assert(search([]byte("b")), DeepEquals, rangeB) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte(""), EndKey: []byte("a")}, {StartKey: []byte("d"), EndKey: []byte("")}, }) - rangeTree.update(rangeD) - c.Assert(rangeTree.len(), Equals, 4) - c.Assert(search([]byte("d")), Equals, rangeD) + rangeTree.Update(*rangeD) + c.Assert(rangeTree.Len(), Equals, 4) + c.Assert(search([]byte("d")), DeepEquals, rangeD) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte(""), EndKey: []byte("a")}, }) // None incomplete for any range after insert range 0 - rangeTree.update(range0) - c.Assert(rangeTree.len(), Equals, 5) + rangeTree.Update(*range0) + c.Assert(rangeTree.Len(), Equals, 5) // Overwrite range B and C. rangeBD := newRange([]byte("b"), []byte("d")) - rangeTree.update(rangeBD) - c.Assert(rangeTree.len(), Equals, 4) + rangeTree.Update(*rangeBD) + c.Assert(rangeTree.Len(), Equals, 4) assertAllComplete() // Overwrite range BD, c-d should be empty - rangeTree.update(rangeB) - c.Assert(rangeTree.len(), Equals, 4) + rangeTree.Update(*rangeB) + c.Assert(rangeTree.Len(), Equals, 4) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte("c"), EndKey: []byte("d")}, }) - rangeTree.update(rangeC) - c.Assert(rangeTree.len(), Equals, 5) + rangeTree.Update(*rangeC) + c.Assert(rangeTree.Len(), Equals, 5) assertAllComplete() } +func (s *testRangeTreeSuite) TestRangeIntersect(c *C) { + rg := newRange([]byte("a"), []byte("c")) + + start, end, isIntersect := rg.Intersect([]byte(""), []byte("")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("c")) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte("a")) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("a"), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("aa"), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("aa")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("b"), []byte("c")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("b")) + c.Assert(end, DeepEquals, []byte("c")) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte{1}) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) + + start, end, isIntersect = rg.Intersect([]byte("c"), []byte("")) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) +} + func BenchmarkRangeTreeUpdate(b *testing.B) { - rangeTree := newRangeTree() + rangeTree := NewRangeTree() for i := 0; i < b.N; i++ { - item := &Range{ + item := Range{ StartKey: []byte(fmt.Sprintf("%20d", i)), EndKey: []byte(fmt.Sprintf("%20d", i+1))} - rangeTree.update(item) + rangeTree.Update(item) } } diff --git a/pkg/storage/flags.go b/pkg/storage/flags.go index 2340467ba..c828f57a1 100644 --- a/pkg/storage/flags.go +++ b/pkg/storage/flags.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 2eb310c3a..4af3ea059 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -46,28 +48,11 @@ func (options *GCSBackendOptions) apply(gcs *backup.GCS) error { } func defineGCSFlags(flags *pflag.FlagSet) { - flags.String(gcsEndpointOption, "", "Set the GCS endpoint URL") - flags.String(gcsStorageClassOption, "", - `Specify the GCS storage class for objects. -If it is not set, objects uploaded are -followed by the default storage class of the bucket. -See https://cloud.google.com/storage/docs/storage-classes -for valid values.`) - flags.String(gcsPredefinedACL, "", - `Specify the GCS predefined acl for objects. -If it is not set, objects uploaded are -followed by the acl of bucket scope. -See https://cloud.google.com/storage/docs/access-control/lists#predefined-acl -for valid values.`) - flags.String(gcsCredentialsFile, "", - `Set the GCS credentials file path. -You can get one from -https://console.cloud.google.com/apis/credentials.`) - - _ = flags.MarkHidden(gcsEndpointOption) - _ = flags.MarkHidden(gcsStorageClassOption) - _ = flags.MarkHidden(gcsPredefinedACL) - _ = flags.MarkHidden(gcsCredentialsFile) + // TODO: remove experimental tag if it's stable + flags.String(gcsEndpointOption, "", "(experimental) Set the GCS endpoint URL") + flags.String(gcsStorageClassOption, "", "(experimental) Specify the GCS storage class for objects") + flags.String(gcsPredefinedACL, "", "(experimental) Specify the GCS predefined acl for objects") + flags.String(gcsCredentialsFile, "", "(experimental) Set the GCS credentials file path") } func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error { diff --git a/pkg/storage/gcs_test.go b/pkg/storage/gcs_test.go index 10bb44371..60a26f616 100644 --- a/pkg/storage/gcs_test.go +++ b/pkg/storage/gcs_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/local.go b/pkg/storage/local.go index 77ca7f6a4..d2555a978 100644 --- a/pkg/storage/local.go +++ b/pkg/storage/local.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/local_unix.go b/pkg/storage/local_unix.go index be0050e83..aedf7c637 100644 --- a/pkg/storage/local_unix.go +++ b/pkg/storage/local_unix.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + // +build !windows package storage diff --git a/pkg/storage/local_windows.go b/pkg/storage/local_windows.go index a3ab2b784..cb784fad4 100644 --- a/pkg/storage/local_windows.go +++ b/pkg/storage/local_windows.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + // +build windows package storage diff --git a/pkg/storage/noop.go b/pkg/storage/noop.go index 17b1dea55..1ee698342 100644 --- a/pkg/storage/noop.go +++ b/pkg/storage/noop.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import "context" diff --git a/pkg/storage/parse.go b/pkg/storage/parse.go index c470d5458..d75e7663d 100644 --- a/pkg/storage/parse.go +++ b/pkg/storage/parse.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/parse_test.go b/pkg/storage/parse_test.go index d72b8a5b3..3f1bc4d4f 100644 --- a/pkg/storage/parse_test.go +++ b/pkg/storage/parse_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -19,7 +21,7 @@ var _ = Suite(&testStorageSuite{}) func (r *testStorageSuite) TestCreateStorage(c *C) { _, err := ParseBackend("1invalid:", nil) - c.Assert(err, ErrorMatches, "parse 1invalid:: first path segment in URL cannot contain colon") + c.Assert(err, ErrorMatches, "parse (.*)1invalid:(.*): first path segment in URL cannot contain colon") _, err = ParseBackend("net:storage", nil) c.Assert(err, ErrorMatches, "storage net not support yet") diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 8e04769b5..bf24b9a2b 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -102,19 +104,14 @@ func (options *S3BackendOptions) apply(s3 *backup.S3) error { } func defineS3Flags(flags *pflag.FlagSet) { - flags.String(s3EndpointOption, "", "Set the S3 endpoint URL, please specify the http or https scheme explicitly") - flags.String(s3RegionOption, "", "Set the S3 region, e.g. us-east-1") - flags.String(s3StorageClassOption, "", "Set the S3 storage class, e.g. STANDARD") - flags.String(s3SSEOption, "", "Set the S3 server-side encryption algorithm, e.g. AES256") - flags.String(s3ACLOption, "", "Set the S3 canned ACLs, e.g. authenticated-read") - flags.String(s3ProviderOption, "", "Set the S3 provider, e.g. aws, alibaba, ceph") - - _ = flags.MarkHidden(s3EndpointOption) - _ = flags.MarkHidden(s3RegionOption) - _ = flags.MarkHidden(s3StorageClassOption) - _ = flags.MarkHidden(s3SSEOption) - _ = flags.MarkHidden(s3ACLOption) - _ = flags.MarkHidden(s3ProviderOption) + // TODO: remove experimental tag if it's stable + flags.String(s3EndpointOption, "", + "(experimental) Set the S3 endpoint URL, please specify the http or https scheme explicitly") + flags.String(s3RegionOption, "", "(experimental) Set the S3 region, e.g. us-east-1") + flags.String(s3StorageClassOption, "", "(experimental) Set the S3 storage class, e.g. STANDARD") + flags.String(s3SSEOption, "", "(experimental) Set the S3 server-side encryption algorithm, e.g. AES256") + flags.String(s3ACLOption, "", "(experimental) Set the S3 canned ACLs, e.g. authenticated-read") + flags.String(s3ProviderOption, "", "(experimental) Set the S3 provider, e.g. aws, alibaba, ceph") } func (options *S3BackendOptions) parseFromFlags(flags *pflag.FlagSet) error { diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go index 3eaf1c206..bd35b6faf 100644 --- a/pkg/storage/s3_test.go +++ b/pkg/storage/s3_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( @@ -72,7 +74,7 @@ func (r *testStorageSuite) TestApply(c *C) { options: S3BackendOptions{ Endpoint: "!http:12345", }, - errMsg: "parse !http:12345: first path segment in URL cannot contain colon", + errMsg: "parse (.*)!http:12345(.*): first path segment in URL cannot contain colon", errReturn: true, }, } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index f9ae368ae..91143ca54 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index cd5aac6c6..76dd8a121 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import ( @@ -25,7 +27,7 @@ const ( type LogCollector interface { SetUnit(unit string) - CollectSuccessUnit(name string, arg interface{}) + CollectSuccessUnit(name string, unitCount int, arg interface{}) CollectFailureUnit(name string, reason error) @@ -33,28 +35,59 @@ type LogCollector interface { CollectInt(name string, t int) + SetSuccessStatus(success bool) + Summary(name string) } -var collector = newLogCollector() +type logFunc func(msg string, fields ...zap.Field) + +var collector LogCollector = newLogCollector(log.Info) + +// InitCollector initilize global collector instance. +func InitCollector( // revive:disable-line:flag-parameter + hasLogFile bool, +) { + logF := log.L().Info + if hasLogFile { + conf := new(log.Config) + // Always duplicate summary to stdout. + logger, _, err := log.InitLogger(conf) + if err == nil { + logF = func(msg string, fields ...zap.Field) { + logger.Info(msg, fields...) + log.Info(msg, fields...) + } + } + } + collector = newLogCollector(logF) +} type logCollector struct { - mu sync.Mutex - unit string - unitCount int - successCosts map[string]time.Duration - successData map[string]uint64 - failureReasons map[string]error - fields []zap.Field + mu sync.Mutex + unit string + successUnitCount int + failureUnitCount int + successCosts map[string]time.Duration + successData map[string]uint64 + failureReasons map[string]error + durations map[string]time.Duration + ints map[string]int + successStatus bool + + log logFunc } -func newLogCollector() LogCollector { +func newLogCollector(log logFunc) LogCollector { return &logCollector{ - unitCount: 0, - fields: make([]zap.Field, 0), - successCosts: make(map[string]time.Duration), - successData: make(map[string]uint64), - failureReasons: make(map[string]error), + successUnitCount: 0, + failureUnitCount: 0, + successCosts: make(map[string]time.Duration), + successData: make(map[string]uint64), + failureReasons: make(map[string]error), + durations: make(map[string]time.Duration), + ints: make(map[string]int), + log: log, } } @@ -64,7 +97,7 @@ func (tc *logCollector) SetUnit(unit string) { tc.unit = unit } -func (tc *logCollector) CollectSuccessUnit(name string, arg interface{}) { +func (tc *logCollector) CollectSuccessUnit(name string, unitCount int, arg interface{}) { tc.mu.Lock() defer tc.mu.Unlock() @@ -72,7 +105,7 @@ func (tc *logCollector) CollectSuccessUnit(name string, arg interface{}) { case time.Duration: if _, ok := tc.successCosts[name]; !ok { tc.successCosts[name] = v - tc.unitCount++ + tc.successUnitCount += unitCount } else { tc.successCosts[name] += v } @@ -90,26 +123,33 @@ func (tc *logCollector) CollectFailureUnit(name string, reason error) { defer tc.mu.Unlock() if _, ok := tc.failureReasons[name]; !ok { tc.failureReasons[name] = reason - tc.unitCount++ + tc.failureUnitCount++ } } func (tc *logCollector) CollectDuration(name string, t time.Duration) { tc.mu.Lock() defer tc.mu.Unlock() - tc.fields = append(tc.fields, zap.Duration(name, t)) + tc.durations[name] += t } func (tc *logCollector) CollectInt(name string, t int) { tc.mu.Lock() defer tc.mu.Unlock() - tc.fields = append(tc.fields, zap.Int(name, t)) + tc.ints[name] += t +} + +func (tc *logCollector) SetSuccessStatus(success bool) { + tc.mu.Lock() + defer tc.mu.Unlock() + tc.successStatus = success } func (tc *logCollector) Summary(name string) { tc.mu.Lock() defer func() { - tc.fields = tc.fields[:0] + tc.durations = make(map[string]time.Duration) + tc.ints = make(map[string]int) tc.successCosts = make(map[string]time.Duration) tc.failureReasons = make(map[string]error) tc.mu.Unlock() @@ -119,27 +159,25 @@ func (tc *logCollector) Summary(name string) { switch tc.unit { case BackupUnit: msg = fmt.Sprintf("total backup ranges: %d, total success: %d, total failed: %d", - tc.unitCount, len(tc.successCosts), len(tc.failureReasons)) - if len(tc.failureReasons) != 0 { - msg += ", failed ranges" - } + tc.failureUnitCount+tc.successUnitCount, tc.successUnitCount, tc.failureUnitCount) case RestoreUnit: - msg = fmt.Sprintf("total restore tables: %d, total success: %d, total failed: %d", - tc.unitCount, len(tc.successCosts), len(tc.failureReasons)) - if len(tc.failureReasons) != 0 { - msg += ", failed tables" - } + msg = fmt.Sprintf("total restore files: %d, total success: %d, total failed: %d", + tc.failureUnitCount+tc.successUnitCount, tc.successUnitCount, tc.failureUnitCount) + } + + logFields := make([]zap.Field, 0, len(tc.durations)+len(tc.ints)) + for key, val := range tc.durations { + logFields = append(logFields, zap.Duration(key, val)) + } + for key, val := range tc.ints { + logFields = append(logFields, zap.Int(key, val)) } - logFields := tc.fields - if len(tc.failureReasons) != 0 { - names := make([]string, 0, len(tc.failureReasons)) - for name := range tc.failureReasons { - // logFields = append(logFields, zap.NamedError(name, reason)) - names = append(names, name) + if len(tc.failureReasons) != 0 || !tc.successStatus { + for unitName, reason := range tc.failureReasons { + logFields = append(logFields, zap.String("unitName", unitName), zap.Error(reason)) } - logFields = append(logFields, zap.Strings(msg, names)) - log.Info(name+" summary", logFields...) + log.Info(name+" Failed summary : "+msg, logFields...) return } totalCost := time.Duration(0) @@ -162,7 +200,7 @@ func (tc *logCollector) Summary(name string) { msg += fmt.Sprintf(", %s: %d", name, data) } - log.Info(name+" summary: "+msg, logFields...) + tc.log(name+" Success summary: "+msg, logFields...) } // SetLogCollector allow pass LogCollector outside diff --git a/pkg/summary/collector_test.go b/pkg/summary/collector_test.go new file mode 100644 index 000000000..165232f55 --- /dev/null +++ b/pkg/summary/collector_test.go @@ -0,0 +1,49 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package summary + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "go.uber.org/zap" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testCollectorSuite{}) + +type testCollectorSuite struct { +} + +func (suit *testCollectorSuite) TestSumDurationInt(c *C) { + fields := []zap.Field{} + logger := func(msg string, fs ...zap.Field) { + fields = append(fields, fs...) + } + col := newLogCollector(logger) + col.CollectDuration("a", time.Second) + col.CollectDuration("b", time.Second) + col.CollectDuration("b", time.Second) + col.CollectInt("c", 2) + col.CollectInt("c", 2) + col.SetSuccessStatus(true) + col.Summary("foo") + + c.Assert(len(fields), Equals, 3) + assertContains := func(field zap.Field) { + for _, f := range fields { + if f.Key == field.Key { + c.Assert(f, DeepEquals, field) + return + } + } + c.Error(fields, "do not contain", field) + } + assertContains(zap.Duration("a", time.Second)) + assertContains(zap.Duration("b", 2*time.Second)) + assertContains(zap.Int("c", 4)) +} diff --git a/pkg/summary/summary.go b/pkg/summary/summary.go index 88d3fb143..852e936a9 100644 --- a/pkg/summary/summary.go +++ b/pkg/summary/summary.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import "time" @@ -8,8 +10,8 @@ func SetUnit(unit string) { } // CollectSuccessUnit collects success time costs -func CollectSuccessUnit(name string, arg interface{}) { - collector.CollectSuccessUnit(name, arg) +func CollectSuccessUnit(name string, unitCount int, arg interface{}) { + collector.CollectSuccessUnit(name, unitCount, arg) } // CollectFailureUnit collects fail reason @@ -27,6 +29,11 @@ func CollectInt(name string, t int) { collector.CollectInt(name, t) } +// SetSuccessStatus sets final success status +func SetSuccessStatus(success bool) { + collector.SetSuccessStatus(success) +} + // Summary outputs summary log func Summary(name string) { collector.Summary(name) diff --git a/pkg/task/backup.go b/pkg/task/backup.go index b9613cd56..c02b0e8c1 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -1,23 +1,37 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( "context" + "strconv" "time" "github.com/pingcap/errors" + kvproto "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/log" + "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" "github.com/spf13/pflag" + "go.uber.org/zap" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" - "github.com/pingcap/br/pkg/utils" ) const ( flagBackupTimeago = "timeago" + flagBackupTS = "backupts" flagLastBackupTS = "lastbackupts" + + defaultBackupConcurrency = 4 ) // BackupConfig is the configuration specific for backup tasks. @@ -25,6 +39,7 @@ type BackupConfig struct { Config TimeAgo time.Duration `json:"time-ago" toml:"time-ago"` + BackupTS uint64 `json:"backup-ts" toml:"backup-ts"` LastBackupTS uint64 `json:"last-backup-ts" toml:"last-backup-ts"` } @@ -34,8 +49,11 @@ func DefineBackupFlags(flags *pflag.FlagSet) { flagBackupTimeago, 0, "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") - flags.Uint64(flagLastBackupTS, 0, "the last time backup ts") - _ = flags.MarkHidden(flagLastBackupTS) + // TODO: remove experimental tag if it's stable + flags.Uint64(flagLastBackupTS, 0, "(experimental) the last time backup ts,"+ + " use for incremental backup, support TSO only") + flags.String(flagBackupTS, "", "the backup ts support TSO or datetime,"+ + " e.g. '400036290571534337', '2018-05-11 01:42:23'") } // ParseFromFlags parses the backup-related flags from the flag set. @@ -52,14 +70,27 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } + backupTS, err := flags.GetString(flagBackupTS) + if err != nil { + return errors.Trace(err) + } + cfg.BackupTS, err = parseTSString(backupTS) + if err != nil { + return errors.Trace(err) + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { return errors.Trace(err) } + if cfg.Config.Concurrency == 0 { + cfg.Config.Concurrency = defaultBackupConcurrency + } return nil } // RunBackup starts a backup task inside the current goroutine. -func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { +func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig) error { + defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() @@ -71,7 +102,7 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { if err != nil { return err } - mgr, err := newMgr(ctx, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) if err != nil { return err } @@ -85,18 +116,37 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { return err } - backupTS, err := client.GetTS(ctx, cfg.TimeAgo) + backupTS, err := client.GetTS(ctx, cfg.TimeAgo, cfg.BackupTS) if err != nil { return err } - defer summary.Summary(cmdName) - ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( mgr.GetDomain(), mgr.GetTiKV(), tableFilter, backupTS) if err != nil { return err } + // nothing to backup + if ranges == nil { + return nil + } + + ddlJobs := make([]*model.Job, 0) + if cfg.LastBackupTS > 0 { + if backupTS < cfg.LastBackupTS { + log.Error("LastBackupTS is larger than current TS") + return errors.New("LastBackupTS is larger than current TS") + } + err = backup.CheckGCSafepoint(ctx, mgr.GetPDClient(), cfg.LastBackupTS) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + ddlJobs, err = backup.GetBackupDDLJobs(mgr.GetDomain(), cfg.LastBackupTS, backupTS) + if err != nil { + return err + } + } // The number of regions need to backup approximateRegions := 0 @@ -113,22 +163,29 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { // Backup // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + + req := kvproto.BackupRequest{ + StartVersion: cfg.LastBackupTS, + EndVersion: backupTS, + RateLimit: cfg.RateLimit, + Concurrency: cfg.Concurrency, + } err = client.BackupRanges( - ctx, ranges, cfg.LastBackupTS, backupTS, cfg.RateLimit, cfg.Concurrency, updateCh) + ctx, ranges, req, updateCh) if err != nil { return err } // Backup has finished - close(updateCh) + updateCh.Close() // Checksum backupSchemasConcurrency := backup.DefaultSchemaConcurrency if backupSchemas.Len() < backupSchemasConcurrency { backupSchemasConcurrency = backupSchemas.Len() } - updateCh = utils.StartProgress( + updateCh = g.StartProgress( ctx, "Checksum", int64(backupSchemas.Len()), !cfg.LogProgress) backupSchemas.SetSkipChecksum(!cfg.Checksum) backupSchemas.Start( @@ -139,19 +196,54 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { return err } - valid, err := client.FastChecksum() - if err != nil { - return err - } - if !valid { - log.Error("backup FastChecksum mismatch!") + if cfg.LastBackupTS == 0 { + var valid bool + valid, err = client.FastChecksum() + if err != nil { + return err + } + if !valid { + log.Error("backup FastChecksum mismatch!") + return errors.Errorf("mismatched checksum") + } + + } else { + // Since we don't support checksum for incremental data, fast checksum should be skipped. + log.Info("Skip fast checksum in incremental backup") } // Checksum has finished - close(updateCh) + updateCh.Close() - err = client.SaveBackupMeta(ctx) + err = client.SaveBackupMeta(ctx, ddlJobs) if err != nil { return err } + + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } + +// parseTSString port from tidb setSnapshotTS +func parseTSString(ts string) (uint64, error) { + if len(ts) == 0 { + return 0, nil + } + if tso, err := strconv.ParseUint(ts, 10, 64); err == nil { + return tso, nil + } + + loc := time.Local + sc := &stmtctx.StatementContext{ + TimeZone: loc, + } + t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp) + if err != nil { + return 0, errors.Trace(err) + } + t1, err := t.Time.GoTime(loc) + if err != nil { + return 0, errors.Trace(err) + } + return variable.GoTimeToTS(t1), nil +} diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go new file mode 100644 index 000000000..fefcc2cf1 --- /dev/null +++ b/pkg/task/backup_raw.go @@ -0,0 +1,148 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "bytes" + "context" + + "github.com/pingcap/errors" + kvproto "github.com/pingcap/kvproto/pkg/backup" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/rtree" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagKeyFormat = "format" + flagTiKVColumnFamily = "cf" + flagStartKey = "start" + flagEndKey = "end" +) + +// RawKvConfig is the common config for rawkv backup and restore. +type RawKvConfig struct { + Config + + StartKey []byte `json:"start-key" toml:"start-key"` + EndKey []byte `json:"end-key" toml:"end-key"` + CF string `json:"cf" toml:"cf"` +} + +// DefineRawBackupFlags defines common flags for the backup command. +func DefineRawBackupFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "backup specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "backup raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "backup raw kv end key, key is exclusive") +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *RawKvConfig) ParseFromFlags(flags *pflag.FlagSet) error { + format, err := flags.GetString(flagKeyFormat) + if err != nil { + return err + } + start, err := flags.GetString(flagStartKey) + if err != nil { + return err + } + cfg.StartKey, err = utils.ParseKey(format, start) + if err != nil { + return err + } + end, err := flags.GetString(flagEndKey) + if err != nil { + return err + } + cfg.EndKey, err = utils.ParseKey(format, end) + if err != nil { + return err + } + + if bytes.Compare(cfg.StartKey, cfg.EndKey) >= 0 { + return errors.New("endKey must be greater than startKey") + } + + cfg.CF, err = flags.GetString(flagTiKVColumnFamily) + if err != nil { + return err + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { + return errors.Trace(err) + } + return nil +} + +// RunBackupRaw starts a backup task inside the current goroutine. +func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := backup.NewBackupClient(ctx, mgr) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + + backupRange := rtree.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey} + + // The number of regions need to backup + approximateRegions, err := mgr.GetRegionCount(ctx, backupRange.StartKey, backupRange.EndKey) + if err != nil { + return err + } + + summary.CollectInt("backup total regions", approximateRegions) + + // Backup + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := g.StartProgress( + ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + + req := kvproto.BackupRequest{ + StartVersion: 0, + EndVersion: 0, + RateLimit: cfg.RateLimit, + Concurrency: cfg.Concurrency, + IsRawKv: true, + Cf: cfg.CF, + } + + err = client.BackupRange(ctx, backupRange.StartKey, backupRange.EndKey, req, updateCh) + if err != nil { + return err + } + // Backup has finished + updateCh.Close() + + // Checksum + err = client.SaveBackupMeta(ctx, nil) + if err != nil { + return err + } + + // Set task summary to success status. + summary.SetSuccessStatus(true) + return nil +} diff --git a/pkg/task/backup_test.go b/pkg/task/backup_test.go new file mode 100644 index 000000000..6bd60515b --- /dev/null +++ b/pkg/task/backup_test.go @@ -0,0 +1,36 @@ +package task + +import ( + "testing" + "time" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testBackupSuite{}) + +func TestT(t *testing.T) { + TestingT(t) +} + +type testBackupSuite struct{} + +func (s *testBackupSuite) TestParseTSString(c *C) { + var ( + ts uint64 + err error + ) + + ts, err = parseTSString("") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 0) + + ts, err = parseTSString("400036290571534337") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 400036290571534337) + + _, offset := time.Now().Local().Zone() + ts, err = parseTSString("2018-05-11 01:42:23") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 400032515489792000-(offset*1000)<<18) +} diff --git a/pkg/task/common.go b/pkg/task/common.go index 2433d94b9..e14b5e0e1 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -1,7 +1,10 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( "context" + "crypto/tls" "fmt" "regexp" "strings" @@ -9,12 +12,15 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/store/tikv" "github.com/spf13/cobra" "github.com/spf13/pflag" + "go.etcd.io/etcd/pkg/transport" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/utils" ) @@ -49,6 +55,25 @@ type TLSConfig struct { Key string `json:"key" toml:"key"` } +// IsEnabled checks if TLS open or not +func (tls *TLSConfig) IsEnabled() bool { + return tls.CA != "" +} + +// ToTLSConfig generate tls.Config +func (tls *TLSConfig) ToTLSConfig() (*tls.Config, error) { + tlsInfo := transport.TLSInfo{ + CertFile: tls.Cert, + KeyFile: tls.Key, + TrustedCAFile: tls.CA, + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, errors.Trace(err) + } + return tlsConfig, nil +} + // Config is the common configuration for all BRIE tasks. type Config struct { storage.BackendOptions @@ -70,16 +95,21 @@ type Config struct { // DefineCommonFlags defines the flags common to all BRIE commands. func DefineCommonFlags(flags *pflag.FlagSet) { flags.BoolP(flagSendCreds, "c", true, "Whether send credentials to tikv") - flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "local:///path/to/save"`) + flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "s3://bucket/path/prefix"`) flags.StringSliceP(flagPD, "u", []string{"127.0.0.1:2379"}, "PD address") flags.String(flagCA, "", "CA certificate path for TLS connection") flags.String(flagCert, "", "Certificate path for TLS connection") flags.String(flagKey, "", "Private key path for TLS connection") flags.Uint64(flagRateLimit, 0, "The rate limit of the task, MB/s per node") - flags.Uint32(flagConcurrency, 4, "The size of thread pool on each node that executes the task") flags.Bool(flagChecksum, true, "Run checksum at end of task") + // Default concurrency is different for backup and restore. + // Leave it 0 and let them adjust the value. + flags.Uint32(flagConcurrency, 0, "The size of thread pool on each node that executes the task") + // It may confuse users , so just hide it. + _ = flags.MarkHidden(flagConcurrency) + flags.Uint64(flagRateLimitUnit, utils.MB, "The unit of rate limit") _ = flags.MarkHidden(flagRateLimitUnit) @@ -178,18 +208,39 @@ func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { } // newMgr creates a new mgr at the given PD address. -func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { +func newMgr( + ctx context.Context, + g glue.Glue, + pds []string, + tlsConfig TLSConfig, + storeBehavior conn.StoreBehavior, +) (*conn.Mgr, error) { + var ( + tlsConf *tls.Config + err error + ) pdAddress := strings.Join(pds, ",") if len(pdAddress) == 0 { return nil, errors.New("pd address can not be empty") } + securityOption := pd.SecurityOption{} + if tlsConfig.IsEnabled() { + securityOption.CAPath = tlsConfig.CA + securityOption.CertPath = tlsConfig.Cert + securityOption.KeyPath = tlsConfig.Key + tlsConf, err = tlsConfig.ToTLSConfig() + if err != nil { + return nil, err + } + } + // Disable GC because TiDB enables GC already. - store, err := tikv.Driver{}.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) + store, err := g.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress), securityOption) if err != nil { return nil, err } - return conn.NewMgr(ctx, pdAddress, store.(tikv.Storage)) + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption, storeBehavior) } // GetStorage gets the storage backend from the config. @@ -211,13 +262,14 @@ func GetStorage( // ReadBackupMeta reads the backupmeta file from the storage. func ReadBackupMeta( ctx context.Context, + fileName string, cfg *Config, ) (*backup.StorageBackend, storage.ExternalStorage, *backup.BackupMeta, error) { u, s, err := GetStorage(ctx, cfg) if err != nil { return nil, nil, nil, err } - metaData, err := s.Read(ctx, utils.MetaFile) + metaData, err := s.Read(ctx, fileName) if err != nil { return nil, nil, nil, errors.Annotate(err, "load backupmeta failed") } diff --git a/pkg/task/restore.go b/pkg/task/restore.go index a56a1d6da..9dce5139e 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( @@ -6,18 +8,24 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/log" + "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/config" "github.com/spf13/pflag" "go.uber.org/zap" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/rtree" + "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) const ( - flagOnline = "online" + flagOnline = "online" + flagNoSchema = "no-schema" ) var schedulers = map[string]struct{}{ @@ -30,18 +38,27 @@ var schedulers = map[string]struct{}{ "shuffle-hot-region-scheduler": {}, } +const ( + defaultRestoreConcurrency = 128 + maxRestoreBatchSizeLimit = 256 +) + // RestoreConfig is the configuration specific for restore tasks. type RestoreConfig struct { Config - Online bool `json:"online" toml:"online"` + Online bool `json:"online" toml:"online"` + NoSchema bool `json:"no-schema" toml:"no-schema"` } // DefineRestoreFlags defines common flags for the restore command. func DefineRestoreFlags(flags *pflag.FlagSet) { - flags.Bool("online", false, "Whether online when restore") - // TODO remove hidden flag if it's stable - _ = flags.MarkHidden("online") + // TODO remove experimental tag if it's stable + flags.Bool(flagOnline, false, "(experimental) Whether online when restore") + flags.Bool(flagNoSchema, false, "skip creating schemas and tables, reuse existing empty ones") + + // Do not expose this flag + _ = flags.MarkHidden(flagNoSchema) } // ParseFromFlags parses the restore-related flags from the flag set. @@ -51,35 +68,59 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } - return cfg.Config.ParseFromFlags(flags) + cfg.NoSchema, err = flags.GetBool(flagNoSchema) + if err != nil { + return errors.Trace(err) + } + err = cfg.Config.ParseFromFlags(flags) + if err != nil { + return errors.Trace(err) + } + if cfg.Config.Concurrency == 0 { + cfg.Config.Concurrency = defaultRestoreConcurrency + } + return nil } // RunRestore starts a restore task inside the current goroutine. -func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { +func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { + defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() - mgr, err := newMgr(ctx, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) if err != nil { return err } defer mgr.Close() - client, err := restore.NewRestoreClient(ctx, mgr.GetPDClient(), mgr.GetTiKV()) + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) if err != nil { return err } defer client.Close() + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } client.SetRateLimit(cfg.RateLimit) client.SetConcurrency(uint(cfg.Concurrency)) if cfg.Online { client.EnableOnline() } + if cfg.NoSchema { + client.EnableSkipCreateSQL() + } + err = client.LoadRestoreStores(ctx) + if err != nil { + return err + } - defer summary.Summary(cmdName) - - u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) if err != nil { return err } @@ -87,14 +128,14 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { return err } - files, tables, err := filterRestoreFiles(client, cfg) + if client.IsRawKvMode() { + return errors.New("cannot do transactional restore from raw kv data") + } + + files, tables, dbs, err := filterRestoreFiles(client, cfg) if err != nil { return err } - if len(files) == 0 { - return errors.New("all files are filtered out from the backup archive, nothing to restore") - } - summary.CollectInt("restore files", len(files)) var newTS uint64 if client.IsIncremental() { @@ -103,10 +144,55 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { return err } } + ddlJobs := restore.FilterDDLJobs(client.GetDDLJobs(), tables) + if err != nil { + return err + } + // execute DDL first + + // set max-index-length before execute DDLs and create tables + // we set this value to max(3072*4), otherwise we might not restore table + // when upstream and downstream both set this value greater than default(3072) + conf := config.GetGlobalConfig() + conf.MaxIndexLength = config.DefMaxOfMaxIndexLength + config.StoreGlobalConfig(conf) + log.Warn("set max-index-length to max(3072*4) to skip check index length in DDL") + + err = client.ExecDDLs(ddlJobs) + if err != nil { + return errors.Trace(err) + } + + // nothing to restore, maybe only ddl changes in incremental restore + if len(files) == 0 { + log.Info("all files are filtered out from the backup archive, nothing to restore") + return nil + } + + for _, db := range dbs { + err = client.CreateDatabase(db.Info) + if err != nil { + return err + } + } + rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) if err != nil { return err } + placementRules, err := client.GetPlacementRules(cfg.PD) + if err != nil { + return err + } + + err = client.RemoveTiFlashReplica(tables, newTables, placementRules) + if err != nil { + return err + } + + defer func() { + _ = client.RecoverTiFlashReplica(tables) + }() ranges, err := restore.ValidateFileRanges(files, rewriteRules) if err != nil { @@ -114,20 +200,27 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { } summary.CollectInt("restore ranges", len(ranges)) + if err = splitPrepareWork(ctx, client, newTables); err != nil { + return err + } + + ranges = restore.AttachFilesToRanges(files, ranges) + // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, cmdName, // Split/Scatter + Download/Ingest int64(len(ranges)+len(files)), !cfg.LogProgress) - err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) + clusterCfg, err := restorePreWork(ctx, client, mgr) if err != nil { - log.Error("split regions failed", zap.Error(err)) return err } + // Do not reset timestamp if we are doing incremental restore, because + // we are not allowed to decrease timestamp. if !client.IsIncremental() { if err = client.ResetTS(cfg.PD); err != nil { log.Error("reset pd TS failed", zap.Error(err)) @@ -135,65 +228,108 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { } } - removedSchedulers, err := restorePreWork(ctx, client, mgr) + // Restore sst files in batch. + batchSize := int(cfg.Concurrency) + if batchSize > maxRestoreBatchSizeLimit { + batchSize = maxRestoreBatchSizeLimit // 256 + } + + tiflashStores, err := conn.GetAllTiKVStores(ctx, client.GetPDClient(), conn.TiFlashOnly) if err != nil { - return err + return errors.Trace(err) + } + rejectStoreMap := make(map[uint64]bool) + for _, store := range tiflashStores { + rejectStoreMap[store.GetId()] = true } - err = client.RestoreFiles(files, rewriteRules, updateCh) - // always run the post-work even on error, so we don't stuck in the import mode or emptied schedulers - postErr := restorePostWork(ctx, client, mgr, removedSchedulers) + for { + if len(ranges) == 0 { + break + } + if batchSize > len(ranges) { + batchSize = len(ranges) + } + var rangeBatch []rtree.Range + ranges, rangeBatch = ranges[batchSize:], ranges[0:batchSize:batchSize] + + // Split regions by the given rangeBatch. + err = restore.SplitRanges(ctx, client, rangeBatch, rewriteRules, updateCh) + if err != nil { + log.Error("split regions failed", zap.Error(err)) + return err + } + + // Collect related files in the given rangeBatch. + fileBatch := make([]*backup.File, 0, 2*len(rangeBatch)) + for _, rg := range rangeBatch { + fileBatch = append(fileBatch, rg.Files...) + } + + // After split, we can restore backup files. + err = client.RestoreFiles(fileBatch, rewriteRules, rejectStoreMap, updateCh) + if err != nil { + break + } + } + + // Always run the post-work even on error, so we don't stuck in the import + // mode or emptied schedulers + if errRestorePostWork := restorePostWork(ctx, client, mgr, clusterCfg); err == nil { + err = errRestorePostWork + } + + if errSplitPostWork := splitPostWork(ctx, client, newTables); err == nil { + err = errSplitPostWork + } + + // If any error happened, return now, don't execute checksum. if err != nil { return err } - if postErr != nil { - return postErr - } // Restore has finished. - close(updateCh) + updateCh.Close() // Checksum - updateCh = utils.StartProgress( + updateCh = g.StartProgress( ctx, "Checksum", int64(len(newTables)), !cfg.LogProgress) err = client.ValidateChecksum( ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) if err != nil { return err } - close(updateCh) + updateCh.Close() + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } func filterRestoreFiles( client *restore.Client, cfg *RestoreConfig, -) (files []*backup.File, tables []*utils.Table, err error) { +) (files []*backup.File, tables []*utils.Table, dbs []*utils.Database, err error) { tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for _, db := range client.GetDatabases() { createdDatabase := false for _, table := range db.Tables { - if !tableFilter.Match(&filter.Table{Schema: db.Schema.Name.O, Name: table.Schema.Name.O}) { + if !tableFilter.Match(&filter.Table{Schema: db.Info.Name.O, Name: table.Info.Name.O}) { continue } if !createdDatabase { - if err = client.CreateDatabase(db.Schema); err != nil { - return nil, nil, err - } + dbs = append(dbs, db) createdDatabase = true } - files = append(files, table.Files...) tables = append(tables, table) } } - return } @@ -252,3 +388,82 @@ func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers } return nil } + +func splitPrepareWork(ctx context.Context, client *restore.Client, tables []*model.TableInfo) error { + err := client.SetupPlacementRules(ctx, tables) + if err != nil { + log.Error("setup placement rules failed", zap.Error(err)) + return errors.Trace(err) + } + + err = client.WaitPlacementSchedule(ctx, tables) + if err != nil { + log.Error("wait placement schedule failed", zap.Error(err)) + return errors.Trace(err) + } + return nil +} + +func splitPostWork(ctx context.Context, client *restore.Client, tables []*model.TableInfo) error { + err := client.ResetPlacementRules(ctx, tables) + if err != nil { + return errors.Trace(err) + } + + err = client.ResetRestoreLabels(ctx) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// RunRestoreTiflashReplica restores the replica of tiflash saved in the last restore. +func RunRestoreTiflashReplica(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + // Load saved backupmeta + _, _, backupMeta, err := ReadBackupMeta(ctx, utils.SavedMetaFile, &cfg.Config) + if err != nil { + return err + } + dbs, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return err + } + se, err := restore.NewDB(g, mgr.GetTiKV()) + if err != nil { + return err + } + + tables := make([]*utils.Table, 0) + for _, db := range dbs { + tables = append(tables, db.Tables...) + } + updateCh := g.StartProgress( + ctx, "RecoverTiflashReplica", int64(len(tables)), !cfg.LogProgress) + for _, t := range tables { + log.Info("get table", zap.Stringer("name", t.Info.Name), + zap.Int("replica", t.TiFlashReplicas)) + if t.TiFlashReplicas > 0 { + err := se.AlterTiflashReplica(ctx, t, t.TiFlashReplicas) + if err != nil { + return err + } + updateCh.Inc() + } + } + updateCh.Close() + summary.CollectInt("recover tables", len(tables)) + + // Set task summary to success status. + summary.SetSuccessStatus(true) + return nil +} diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go new file mode 100644 index 000000000..03e987456 --- /dev/null +++ b/pkg/task/restore_raw.go @@ -0,0 +1,134 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +// RestoreRawConfig is the configuration specific for raw kv restore tasks. +type RestoreRawConfig struct { + RawKvConfig + + Online bool `json:"online" toml:"online"` +} + +// DefineRawRestoreFlags defines common flags for the backup command. +func DefineRawRestoreFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "restore specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "restore raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "restore raw kv end key, key is exclusive") + + command.Flags().Bool(flagOnline, false, "Whether online when restore") + // TODO remove hidden flag if it's stable + _ = command.Flags().MarkHidden(flagOnline) +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *RestoreRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + return cfg.RawKvConfig.ParseFromFlags(flags) +} + +// RunRestoreRaw starts a raw kv restore task inside the current goroutine. +func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) (err error) { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.ErrorOnTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) + if err != nil { + return err + } + defer client.Close() + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + + u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + if !client.IsRawKvMode() { + return errors.New("cannot do raw restore from transactional data") + } + + files, err := client.GetFilesInRawRange(cfg.StartKey, cfg.EndKey, cfg.CF) + if err != nil { + return errors.Trace(err) + } + + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + summary.CollectInt("restore files", len(files)) + + ranges, err := restore.ValidateFileRanges(files, nil) + if err != nil { + return errors.Trace(err) + } + + // Redirect to log if there is no log file to avoid unreadable output. + // TODO: How to show progress? + updateCh := g.StartProgress( + ctx, + "Raw Restore", + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + err = restore.SplitRanges(ctx, client, ranges, nil, updateCh) + if err != nil { + return errors.Trace(err) + } + + removedSchedulers, err := restorePreWork(ctx, client, mgr) + if err != nil { + return errors.Trace(err) + } + defer func() { + errPostWork := restorePostWork(ctx, client, mgr, removedSchedulers) + if err == nil { + err = errPostWork + } + }() + + err = client.RestoreRaw(cfg.StartKey, cfg.EndKey, files, updateCh) + if err != nil { + return errors.Trace(err) + } + + // Restore has finished. + updateCh.Close() + + // Set task summary to success status. + summary.SetSuccessStatus(true) + return nil +} diff --git a/pkg/utils/key.go b/pkg/utils/key.go new file mode 100644 index 000000000..8caeb2833 --- /dev/null +++ b/pkg/utils/key.go @@ -0,0 +1,90 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strings" + + "github.com/pingcap/errors" +) + +// ParseKey parse key by given format +func ParseKey(format, key string) ([]byte, error) { + switch format { + case "raw": + return []byte(key), nil + case "escaped": + return unescapedKey(key) + case "hex": + key, err := hex.DecodeString(key) + if err != nil { + return nil, errors.WithStack(err) + } + return key, nil + } + return nil, errors.New("unknown format") +} + +// Ref PD: https://github.com/pingcap/pd/blob/master/tools/pd-ctl/pdctl/command/region_command.go#L334 +func unescapedKey(text string) ([]byte, error) { + var buf []byte + r := bytes.NewBuffer([]byte(text)) + for { + c, err := r.ReadByte() + if err != nil { + if err != io.EOF { + return nil, errors.WithStack(err) + } + break + } + if c != '\\' { + buf = append(buf, c) + continue + } + n := r.Next(1) + if len(n) == 0 { + return nil, io.EOF + } + // See: https://golang.org/ref/spec#Rune_literals + if idx := strings.IndexByte(`abfnrtv\'"`, n[0]); idx != -1 { + buf = append(buf, []byte("\a\b\f\n\r\t\v\\'\"")[idx]) + continue + } + + switch n[0] { + case 'x': + fmt.Sscanf(string(r.Next(2)), "%02x", &c) + buf = append(buf, c) + default: + n = append(n, r.Next(2)...) + _, err := fmt.Sscanf(string(n), "%03o", &c) + if err != nil { + return nil, errors.WithStack(err) + } + buf = append(buf, c) + } + } + return buf, nil +} + +// CompareEndKey compared two keys that BOTH represent the EXCLUSIVE ending of some range. An empty end key is the very +// end, so an empty key is greater than any other keys. +// Please note that this function is not applicable if any one argument is not an EXCLUSIVE ending of a range. +func CompareEndKey(a, b []byte) int { + if len(a) == 0 { + if len(b) == 0 { + return 0 + } + return 1 + } + + if len(b) == 0 { + return -1 + } + + return bytes.Compare(a, b) +} diff --git a/pkg/utils/key_test.go b/pkg/utils/key_test.go new file mode 100644 index 000000000..3e20bae24 --- /dev/null +++ b/pkg/utils/key_test.go @@ -0,0 +1,54 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "encoding/hex" + + . "github.com/pingcap/check" +) + +type testKeySuite struct{} + +var _ = Suite(&testKeySuite{}) + +func (r *testKeySuite) TestParseKey(c *C) { + rawKey := "1234" + parsedKey, err := ParseKey("raw", rawKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte(rawKey)) + + escapedKey := "\\a\\x1" + parsedKey, err = ParseKey("escaped", escapedKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte("\a\x01")) + + hexKey := hex.EncodeToString([]byte("1234")) + parsedKey, err = ParseKey("hex", hexKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte("1234")) + + _, err = ParseKey("notSupport", rawKey) + c.Assert(err, ErrorMatches, "*unknown format*") + +} + +func (r *testKeySuite) TestCompareEndKey(c *C) { + res := CompareEndKey([]byte("1"), []byte("2")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte("1"), []byte("1")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte("2"), []byte("1")) + c.Assert(res, Greater, 0) + + res = CompareEndKey([]byte("1"), []byte("")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte(""), []byte("")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte(""), []byte("1")) + c.Assert(res, Greater, 0) +} diff --git a/pkg/utils/mock_cluster_test.go b/pkg/utils/mock_cluster_test.go deleted file mode 100644 index 42cacae9c..000000000 --- a/pkg/utils/mock_cluster_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package utils - -import ( - . "github.com/pingcap/check" - "github.com/pingcap/tidb/util/testleak" -) - -var _ = Suite(&testMockClusterSuite{}) - -type testMockClusterSuite struct { - mock *MockCluster -} - -func (s *testMockClusterSuite) SetUpSuite(c *C) { - var err error - s.mock, err = NewMockCluster() - c.Assert(err, IsNil) -} - -func (s *testMockClusterSuite) TearDownSuite(c *C) { - testleak.AfterTest(c)() -} - -func (s *testMockClusterSuite) TestSmoke(c *C) { - c.Assert(s.mock.Start(), IsNil) - s.mock.Stop() -} diff --git a/pkg/utils/pd.go b/pkg/utils/pd.go new file mode 100644 index 000000000..a35cf88e0 --- /dev/null +++ b/pkg/utils/pd.go @@ -0,0 +1,107 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "bytes" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/pd/v3/pkg/codec" + "github.com/pingcap/pd/v3/server/schedule/placement" + "github.com/pingcap/tidb/tablecodec" +) + +const ( + resetTSURL = "/pd/api/v1/admin/reset-ts" + placementRuleURL = "/pd/api/v1/config/rules" +) + +// ResetTS resets the timestamp of PD to a bigger value +func ResetTS(pdAddr string, ts uint64, tlsConf *tls.Config) error { + req, err := json.Marshal(struct { + TSO string `json:"tso,omitempty"` + }{TSO: fmt.Sprintf("%d", ts)}) + if err != nil { + return err + } + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + resetTSURL + resp, err := cli.Post(reqURL, "application/json", strings.NewReader(string(req))) + if err != nil { + return errors.Trace(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusForbidden { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(resp.Body) + return errors.Errorf("pd resets TS failed: req=%v, resp=%v, err=%v", string(req), buf.String(), err) + } + return nil +} + +// GetPlacementRules return the current placement rules +func GetPlacementRules(pdAddr string, tlsConf *tls.Config) ([]placement.Rule, error) { + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + placementRuleURL + resp, err := cli.Get(reqURL) + if err != nil { + return nil, errors.Trace(err) + } + defer resp.Body.Close() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(resp.Body) + if err != nil { + return nil, errors.Trace(err) + } + if resp.StatusCode == http.StatusPreconditionFailed { + return []placement.Rule{}, nil + } + if resp.StatusCode != http.StatusOK { + return nil, errors.Errorf("get placement rules failed: resp=%v, err=%v, code=%d", buf.String(), err, resp.StatusCode) + } + var rules []placement.Rule + err = json.Unmarshal(buf.Bytes(), &rules) + if err != nil { + return nil, errors.Trace(err) + } + return rules, nil +} + +// SearchPlacementRule returns the placement rule matched to the table or nil +func SearchPlacementRule(tableID int64, placementRules []placement.Rule, role placement.PeerRoleType) *placement.Rule { + for _, rule := range placementRules { + key, err := hex.DecodeString(rule.StartKeyHex) + if err != nil { + continue + } + _, decoded, err := codec.DecodeBytes(key) + if err != nil { + continue + } + if rule.Role == role && tableID == tablecodec.DecodeTableID(decoded) { + return &rule + } + } + return nil +} diff --git a/pkg/utils/progress.go b/pkg/utils/progress.go index 8c66093f0..da6b20364 100644 --- a/pkg/utils/progress.go +++ b/pkg/utils/progress.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( @@ -55,7 +57,7 @@ func (pp *ProgressPrinter) goPrintProgress( bar.Set(pb.Color, true) bar.SetWriter(&wrappedWriter{name: pp.name}) } else { - tmpl := `{{string . "barName" | red}} {{ bar . "<" "-" (cycle . "-" "\\" "|" "/" ) "." ">"}} {{percent .}}` + tmpl := `{{string . "barName" | green}} {{ bar . "<" "-" (cycle . "-" "\\" "|" "/" ) "." ">"}} {{percent .}}` bar = pb.ProgressBarTemplate(tmpl).Start64(pp.total) bar.Set("barName", pp.name) } diff --git a/pkg/utils/progress_test.go b/pkg/utils/progress_test.go index 7c1d9c947..0d76abd8f 100644 --- a/pkg/utils/progress_test.go +++ b/pkg/utils/progress_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/retry.go b/pkg/utils/retry.go index a8f446764..1dbbcdad2 100644 --- a/pkg/utils/retry.go +++ b/pkg/utils/retry.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index 67d28132f..5ac439e36 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -1,17 +1,16 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( "bytes" - "context" "encoding/json" "strings" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/util/sqlexec" ) const ( @@ -19,28 +18,31 @@ const ( MetaFile = "backupmeta" // MetaJSONFile represents backup meta json file name MetaJSONFile = "backupmeta.json" + // SavedMetaFile represents saved meta file name for recovering later + SavedMetaFile = "backupmeta.bak" ) // Table wraps the schema and files of a table. type Table struct { - Db *model.DBInfo - Schema *model.TableInfo - Crc64Xor uint64 - TotalKvs uint64 - TotalBytes uint64 - Files []*backup.File + Db *model.DBInfo + Info *model.TableInfo + Crc64Xor uint64 + TotalKvs uint64 + TotalBytes uint64 + Files []*backup.File + TiFlashReplicas int } // Database wraps the schema and tables of a database. type Database struct { - Schema *model.DBInfo + Info *model.DBInfo Tables []*Table } // GetTable returns a table of the database by name. func (db *Database) GetTable(name string) *Table { for _, table := range db.Tables { - if table.Schema.Name.String() == name { + if table.Info.Name.String() == name { return table } } @@ -61,7 +63,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { db, ok := databases[dbInfo.Name.String()] if !ok { db = &Database{ - Schema: dbInfo, + Info: dbInfo, Tables: make([]*Table, 0), } databases[dbInfo.Name.String()] = db @@ -93,12 +95,13 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { } } table := &Table{ - Db: dbInfo, - Schema: tableInfo, - Crc64Xor: schema.Crc64Xor, - TotalKvs: schema.TotalKvs, - TotalBytes: schema.TotalBytes, - Files: tableFiles, + Db: dbInfo, + Info: tableInfo, + Crc64Xor: schema.Crc64Xor, + TotalKvs: schema.TotalKvs, + TotalBytes: schema.TotalBytes, + Files: tableFiles, + TiFlashReplicas: int(schema.TiflashReplicas), } db.Tables = append(db.Tables, table) } @@ -106,36 +109,6 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { return databases, nil } -// ResultSetToStringSlice changes the RecordSet to [][]string. port from tidb -func ResultSetToStringSlice(ctx context.Context, s session.Session, rs sqlexec.RecordSet) ([][]string, error) { - rows, err := session.GetRows4Test(ctx, s, rs) - if err != nil { - return nil, err - } - err = rs.Close() - if err != nil { - return nil, err - } - sRows := make([][]string, len(rows)) - for i := range rows { - row := rows[i] - iRow := make([]string, row.Len()) - for j := 0; j < row.Len(); j++ { - if row.IsNull(j) { - iRow[j] = "" - } else { - d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) - iRow[j], err = d.ToString() - if err != nil { - return nil, err - } - } - } - sRows[i] = iRow - } - return sRows, nil -} - // EncloseName formats name in sql func EncloseName(name string) string { return "`" + strings.ReplaceAll(name, "`", "``") + "`" diff --git a/pkg/utils/schema_test.go b/pkg/utils/schema_test.go index 336b6d4f8..22456be83 100644 --- a/pkg/utils/schema_test.go +++ b/pkg/utils/schema_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go deleted file mode 100644 index a4ca5f5b5..000000000 --- a/pkg/utils/tso.go +++ /dev/null @@ -1,37 +0,0 @@ -package utils - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/pingcap/errors" -) - -const ( - resetTSURL = "/pd/api/v1/admin/reset-ts" -) - -// ResetTS resets the timestamp of PD to a bigger value -func ResetTS(pdAddr string, ts uint64) error { - req, err := json.Marshal(struct { - TSO string `json:"tso,omitempty"` - }{TSO: fmt.Sprintf("%d", ts)}) - if err != nil { - return err - } - // TODO: Support TLS - reqURL := "http://" + pdAddr + resetTSURL - resp, err := http.Post(reqURL, "application/json", strings.NewReader(string(req))) - if err != nil { - return errors.Trace(err) - } - if resp.StatusCode != 200 && resp.StatusCode != 403 { - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(resp.Body) - return errors.Errorf("pd resets TS failed: req=%v, resp=%v, err=%v", string(req), buf.String(), err) - } - return nil -} diff --git a/pkg/utils/unit.go b/pkg/utils/unit.go index a12dcb6c2..253d97eb6 100644 --- a/pkg/utils/unit.go +++ b/pkg/utils/unit.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils // unit of storage diff --git a/pkg/utils/unit_test.go b/pkg/utils/unit_test.go index 5b3c00530..6cf89e316 100644 --- a/pkg/utils/unit_test.go +++ b/pkg/utils/unit_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index f82e28c69..ff8affa7c 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/version.go b/pkg/utils/version.go index 13a3c7a92..e3d46e301 100644 --- a/pkg/utils/version.go +++ b/pkg/utils/version.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/worker.go b/pkg/utils/worker.go index a77bae090..2d800ddcd 100644 --- a/pkg/utils/worker.go +++ b/pkg/utils/worker.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/tests/README.md b/tests/README.md index 9f307a8a6..fbb018505 100644 --- a/tests/README.md +++ b/tests/README.md @@ -11,6 +11,7 @@ programs. * `bin/pd-server` * `bin/pd-ctl` * `bin/go-ycsb` + * `bin/minio` The versions must be ≥2.1.0 as usual. @@ -18,6 +19,7 @@ programs. * `mysql` (the CLI client) * `curl` + * `s3cmd` 3. The user executing the tests must have permission to create the folder `/tmp/backup_restore_test`. All test artifacts will be written into this folder. @@ -32,7 +34,7 @@ Run `make integration_test` to execute the integration tests. This command will 2. Check that all 6 required executables and `br` executable exist 3. Execute `tests/run.sh` -If the first tow steps are done before, you could also run `tests/run.sh` directly. +If the first two steps are done before, you could also run `tests/run.sh` directly. This script will 1. Start PD, TiKV and TiDB in background with local storage @@ -45,4 +47,4 @@ The script should exit with a nonzero error code on failure. Several convenient commands are provided: -* `run_sql ` — Executes an SQL query on the TiDB database \ No newline at end of file +* `run_sql ` — Executes an SQL query on the TiDB database diff --git a/tests/_utils/run_services b/tests/_utils/run_services new file mode 100644 index 000000000..f31152932 --- /dev/null +++ b/tests/_utils/run_services @@ -0,0 +1,203 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +TEST_DIR=/tmp/backup_restore_test + +PD_ADDR="127.0.0.1:2379" +TIDB_IP="127.0.0.1" +TIDB_PORT="4000" +TIDB_ADDR="127.0.0.1:4000" +TIDB_STATUS_ADDR="127.0.0.1:10080" +# actaul tikv_addr are TIKV_ADDR${i} +TIKV_ADDR="127.0.0.1:2016" +TIKV_STATUS_ADDR="127.0.0.1:2018" +TIKV_COUNT=4 + +stop_services() { + killall -9 tikv-server || true + killall -9 pd-server || true + killall -9 tidb-server || true + + find "$TEST_DIR" -maxdepth 1 -not -path "$TEST_DIR" -not -name "*.log" | xargs rm -r || true +} + +start_services() { + stop_services + + TIDB_CONFIG="${1-tests}/config/tidb.toml" + TIKV_CONFIG="${1-tests}/config/tikv.toml" + + echo "Starting PD..." + mkdir -p "$TEST_DIR/pd" + bin/pd-server \ + --client-urls "http://$PD_ADDR" \ + --log-file "$TEST_DIR/pd.log" \ + --data-dir "$TEST_DIR/pd" & + # wait until PD is online... + i=0 + while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to start PD' + exit 1 + fi + sleep 3 + done + + echo "Starting TiKV..." + for i in $(seq $TIKV_COUNT); do + mkdir -p "$TEST_DIR/tikv${i}" + bin/tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ + --log-file "$TEST_DIR/tikv${i}.log" \ + -C "$TIKV_CONFIG" \ + -s "$TEST_DIR/tikv${i}" & + done + + echo "Waiting initializing TiKV..." + while ! curl -sf "http://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to initialize TiKV cluster' + exit 1 + fi + sleep 3 + done + + echo "Starting TiDB..." + bin/tidb-server \ + -P 4000 \ + --status 10080 \ + --store tikv \ + --path "$PD_ADDR" \ + --config "$TIDB_CONFIG" \ + --log-file "$TEST_DIR/tidb.log" & + + echo "Verifying TiDB is started..." + i=0 + while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do + i=$((i+1)) + if [ "$i" -gt 50 ]; then + echo 'Failed to start TiDB' + exit 1 + fi + sleep 3 + done + + i=0 + while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to bootstrap cluster' + exit 1 + fi + sleep 3 + done +} + +start_services_withTLS() { + stop_services + + PD_CONFIG="$1/config/pd.toml" + TIDB_CONFIG="$1/config/tidb.toml" + TIKV_CONFIG="$1/config/tikv.toml" + + echo $PD_CONFIG + echo $TIDB_CONFIG + echo $TIKV_CONFIG + + echo "Starting PD..." + bin/pd-server \ + --client-urls "https://$PD_ADDR" \ + --log-file "$TEST_DIR/pd.log" \ + --config "$PD_CONFIG" \ + --data-dir "$TEST_DIR/pd" & + # wait until PD is online... + i=0 + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -o /dev/null -sf "https://$PD_ADDR/pd/api/v1/version"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to start PD' + exit 1 + fi + sleep 3 + done + + echo "Starting TiKV..." + for i in $(seq $TIKV_COUNT); do + bin/tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --log-file "$TEST_DIR/tikv${i}.log" \ + -C "$TIKV_CONFIG" \ + -s "$TEST_DIR/tikv${i}" & + done + + echo "Waiting initializing TiKV..." + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -sf "https://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to initialize TiKV cluster' + exit 1 + fi + sleep 3 + done + + echo "Starting TiDB..." + bin/tidb-server \ + -P 4000 \ + --status 10080 \ + --store tikv \ + --config "$TIDB_CONFIG" \ + --path "$PD_ADDR" \ + --log-file "$TEST_DIR/tidb.log" & + + echo "Verifying TiDB is started..." + i=0 + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -o /dev/null -sf "https://$TIDB_IP:10080/status"; do + i=$((i+1)) + if [ "$i" -gt 50 ]; then + echo 'Failed to start TiDB' + exit 1 + fi + sleep 3 + done + + i=0 + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + "https://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do + i=$((i+1)) + if [ "$i" -gt 20 ]; then + echo 'Failed to bootstrap cluster' + exit 1 + fi + sleep 3 + done +} diff --git a/tests/br_alter_pk_server/config/tidb.toml b/tests/br_alter_pk_server/config/tidb.toml new file mode 100644 index 000000000..30b7d4869 --- /dev/null +++ b/tests/br_alter_pk_server/config/tidb.toml @@ -0,0 +1,8 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whther BR is gracefully shutdown. +lease = "360s" + +alter-primary-key = true diff --git a/tests/br_alter_pk_server/config/tikv.toml b/tests/br_alter_pk_server/config/tikv.toml new file mode 100644 index 000000000..edcd02a98 --- /dev/null +++ b/tests/br_alter_pk_server/config/tikv.toml @@ -0,0 +1,14 @@ +# config of tikv + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false +capacity = "10GB" diff --git a/tests/br_alter_pk_server/run.sh b/tests/br_alter_pk_server/run.sh new file mode 100755 index 000000000..6485a43be --- /dev/null +++ b/tests/br_alter_pk_server/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/run_services + +DB="$TEST_NAME" + +# prepare database +echo "Restart cluster with alter-primary-key = true" +start_services "$cur" + +run_sql "drop schema if exists $DB;" +run_sql "create schema $DB;" + +run_sql "create table $DB.a (a int primary key, b int unique);" +run_sql "insert into $DB.a values (42, 42);" + +# backup +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" + +# restore +run_sql "drop schema $DB;" +run_br --pd $PD_ADDR restore db --db "$DB" -s "local://$TEST_DIR/$DB" + +run_sql "drop schema $DB;" +echo "Restart service with alter-primary-key = false" +start_services diff --git a/tests/br_db_online/run.sh b/tests/br_db_online/run.sh new file mode 100755 index 000000000..95c3121d4 --- /dev/null +++ b/tests/br_db_online/run.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --online + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_db_online_newkv/run.sh b/tests/br_db_online_newkv/run.sh new file mode 100755 index 000000000..d8c3f15ff --- /dev/null +++ b/tests/br_db_online_newkv/run.sh @@ -0,0 +1,77 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +# enable placement rules +echo "config set enable-placement-rules true" | pd-ctl + +# add new tikv for restore +# actaul tikv_addr are TIKV_ADDR${i} +TIKV_ADDR="127.0.0.1:2017" +TIKV_STATUS_ADDR="127.0.0.1:2019" +TIKV_COUNT=3 + +echo "Starting restore TiKV..." +for i in $(seq $TIKV_COUNT); do + tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ + --log-file "$TEST_DIR/restore-tikv${i}.log" \ + -C "tests/config/restore-tikv.toml" \ + -s "$TEST_DIR/restore-tikv${i}" & +done +sleep 5 + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --online + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +echo "config set enable-placement-rules false" | pd-ctl + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_db_skip/run.sh b/tests/br_db_skip/run.sh new file mode 100755 index 000000000..e126447c6 --- /dev/null +++ b/tests/br_db_skip/run.sh @@ -0,0 +1,72 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +run_sql "CREATE DATABASE $DB;" +# restore db with skip-create-sql must failed +echo "restore start but must failed" +fail=false +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --no-schema=true || fail=true +if $fail; then + # Error: [schema:1146]Table 'br_db_skip.usertable1' doesn't exist + echo "TEST: [$TEST_NAME] restore $DB with no-schema must failed" +else + echo "TEST: [$TEST_NAME] restore $DB with no-schema not failed" + exit 1 +fi + + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +echo "restore start must succeed" +fail=false +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --no-schema=true || fail=true +if $fail; then + echo "TEST: [$TEST_NAME] restore $DB with no-schema failed" + exit 1 +else + echo "TEST: [$TEST_NAME] restore $DB with no-schema succeed" +fi + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "1" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 1e40415d7..93c5b28fb 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -36,7 +36,10 @@ done # backup full echo "backup start..." -br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +# Do not log to terminal +unset BR_LOG_TO_TERM +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG || cat $LOG +BR_LOG_TO_TERM=1 checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) @@ -50,7 +53,7 @@ run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." -br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR +run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') diff --git a/tests/br_full_index/run.sh b/tests/br_full_index/run.sh index 5069035e6..bb2486802 100755 --- a/tests/br_full_index/run.sh +++ b/tests/br_full_index/run.sh @@ -36,7 +36,10 @@ done # backup full echo "backup start..." -run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +# Do not log to terminal +unset BR_LOG_TO_TERM +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG || cat $LOG +BR_LOG_TO_TERM=1 checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) diff --git a/tests/br_incremental/run.sh b/tests/br_incremental/run.sh index bb6a42efb..b6a6061de 100755 --- a/tests/br_incremental/run.sh +++ b/tests/br_incremental/run.sh @@ -20,55 +20,38 @@ TABLE="usertable" run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB - -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +row_count_ori_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # full backup echo "full backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 - -run_sql "DROP TABLE $DB.$TABLE;" - -# full restore -echo "full restore start..." -run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -if [ "$row_count_ori" -ne "$row_count_new" ];then - echo "TEST: [$TEST_NAME] full br failed!" - exit 1 -fi +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 go-ycsb run mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) - -# clean up data -rm -rf $TEST_DIR/$DB - # incremental backup echo "incremental backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts - -start_ts=$(br validate decode --field="start-version" -s "local://$TEST_DIR/$DB" | tail -n1) -end_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts +row_count_ori_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -echo "start version: $start_ts, end version: $end_ts" +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${row_count_ori_full}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi # incremental restore echo "incremental restore start..." -run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -echo "[original] row count: $row_count_ori, [after br] row count: $row_count_new" - -if [ "$row_count_ori" -eq "$row_count_new" ];then - echo "TEST: [$TEST_NAME] successed!" -else - echo "TEST: [$TEST_NAME] failed!" +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${row_count_ori_inc}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" exit 1 fi diff --git a/tests/br_incremental_ddl/run.sh b/tests/br_incremental_ddl/run.sh new file mode 100755 index 000000000..d9a88709b --- /dev/null +++ b/tests/br_incremental_ddl/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('$i');" +done +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('1');" + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_index/run.sh b/tests/br_incremental_index/run.sh new file mode 100755 index 000000000..f4b4b9de7 --- /dev/null +++ b/tests/br_incremental_index/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE} VALUES ($i);" +done + +# full backup +echo "backup full start..." +run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)" & +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +wait +# run ddls +echo "run ddls..." +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c2 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c3 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} DROP COLUMN c3;"; +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE} VALUES (1, 1);" +row_count_insert=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check insert count +if [ "${row_count_insert}" != "$(expr $row_count_inc + 1)" ];then + echo "TEST: [$TEST_NAME] insert record fail on database $DB" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_only_ddl/run.sh b/tests/br_incremental_only_ddl/run.sh new file mode 100755 index 000000000..f525acda4 --- /dev/null +++ b/tests/br_incremental_only_ddl/run.sh @@ -0,0 +1,72 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" + +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" + +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +fail=false +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR || fail=true +if $fail; then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +else + echo "TEST: [$TEST_NAME] successed!" +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_same_table/run.sh b/tests/br_incremental_same_table/run.sh new file mode 100755 index 000000000..797806837 --- /dev/null +++ b/tests/br_incremental_same_table/run.sh @@ -0,0 +1,86 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" +DB_COUNT=3 + +echo "load data..." + +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +# run ddls + +# create 3 databases, each db has one table with same name +for i in $(seq $DB_COUNT); do + # create database + run_sql "CREATE DATABASE $DB$i;" + # create table + run_sql "CREATE TABLE IF NOT EXISTS $DB$i.${TABLE} (c1 INT);" + # insert records + for j in $(seq $ROW_COUNT); do + run_sql "INSERT INTO $DB$i.${TABLE}(c1) VALUES ($j);" + done +done + +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/inc" --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +# cleanup env +run_sql "DROP DATABASE $DB;" +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB$i;" +done + +# full restore +echo "full restore start..." +run_br restore full -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi + +# incremental restore only DB2.Table +echo "incremental restore start..." +run_br restore table --db ${DB}2 --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM ${DB}2.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi + +# cleanup env +run_sql "DROP DATABASE $DB;" +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE IF EXISTS $DB$i;" +done diff --git a/tests/br_key_locked/codec.go b/tests/br_key_locked/codec.go index cd02c35d7..40911ddab 100644 --- a/tests/br_key_locked/codec.go +++ b/tests/br_key_locked/codec.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb/util/codec" ) diff --git a/tests/br_key_locked/locker.go b/tests/br_key_locked/locker.go index bcac3efd8..36019220d 100644 --- a/tests/br_key_locked/locker.go +++ b/tests/br_key_locked/locker.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/store/tikv/tikvrpc" diff --git a/tests/br_move_backup/run.sh b/tests/br_move_backup/run.sh index 43f27a9af..b85d25823 100755 --- a/tests/br_move_backup/run.sh +++ b/tests/br_move_backup/run.sh @@ -32,6 +32,15 @@ run_sql "DROP TABLE $DB.$TABLE;" # change backup path mv $TEST_DIR/$DB $TEST_DIR/another$DB +# restore table with old path +echo "restore with old path start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" --pd $PD_ADDR || restore_old_fail=1 + +if [ "$restore_old_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test restore with old path failed!" + exit 1 +fi + # restore table echo "restore start..." run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/another$DB" --pd $PD_ADDR diff --git a/tests/br_rawkv/client.go b/tests/br_rawkv/client.go new file mode 100644 index 000000000..bd13839f6 --- /dev/null +++ b/tests/br_rawkv/client.go @@ -0,0 +1,325 @@ +package main + +import ( + "bytes" + "encoding/hex" + "flag" + "fmt" + "hash/crc64" + "math/rand" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/store/tikv" + "github.com/prometheus/common/log" +) + +var ( + pdAddr = flag.String("pd", "127.0.0.1:2379", "Address of PD") + runMode = flag.String("mode", "", "Mode. One of 'rand-gen', 'checksum', 'scan' and 'diff'") + startKeyStr = flag.String("start-key", "", "Start key in hex") + endKeyStr = flag.String("end-key", "", "End key in hex") + keyMaxLen = flag.Int("key-max-len", 32, "Max length of keys for rand-gen mode") + concurrency = flag.Int("concurrency", 32, "Concurrency to run rand-gen") + duration = flag.Int("duration", 10, "duration(second) of rand-gen") +) + +func createClient(addr string) (*tikv.RawKVClient, error) { + cli, err := tikv.NewRawKVClient([]string{addr}, config.Security{}) + return cli, err +} + +func main() { + flag.Parse() + + startKey, err := hex.DecodeString(*startKeyStr) + if err != nil { + log.Fatalf("Invalid startKey: %v, err: %+v", startKeyStr, err) + } + endKey, err := hex.DecodeString(*endKeyStr) + if err != nil { + log.Fatalf("Invalid endKey: %v, err: %+v", endKeyStr, err) + } + if len(endKey) == 0 { + log.Fatal("Empty endKey is not supported yet") + } + + if *runMode == "test-rand-key" { + testRandKey(startKey, endKey, *keyMaxLen) + return + } + + client, err := createClient(*pdAddr) + if err != nil { + log.Fatalf("Failed to create client to %v, err: %+v", *pdAddr, err) + } + + switch *runMode { + case "rand-gen": + err = randGenWithDuration(client, startKey, endKey, *keyMaxLen, *concurrency, *duration) + case "checksum": + err = checksum(client, startKey, endKey) + case "scan": + err = scan(client, startKey, endKey) + case "delete": + err = deleteRange(client, startKey, endKey) + } + + if err != nil { + log.Fatalf("Error: %+v", err) + } +} + +func randGenWithDuration(client *tikv.RawKVClient, startKey, endKey []byte, + maxLen int, concurrency int, duration int) error { + var err error + ok := make(chan struct{}) + go func() { + err = randGen(client, startKey, endKey, maxLen, concurrency) + ok <- struct{}{} + }() + select { + case <-time.After(time.Second * time.Duration(duration)): + case <-ok: + } + return err +} + +func randGen(client *tikv.RawKVClient, startKey, endKey []byte, maxLen int, concurrency int) error { + log.Infof("Start rand-gen from %v to %v, maxLen %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey), maxLen) + log.Infof("Rand-gen will keep running. Please Ctrl+C to stop manually.") + + // Cannot generate shorter key than commonPrefix + commonPrefixLen := 0 + for ; commonPrefixLen < len(startKey) && commonPrefixLen < len(endKey) && + startKey[commonPrefixLen] == endKey[commonPrefixLen]; commonPrefixLen++ { + continue + } + + if maxLen < commonPrefixLen { + return errors.Errorf("maxLen (%v) < commonPrefixLen (%v)", maxLen, commonPrefixLen) + } + + const batchSize = 32 + + errCh := make(chan error, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for { + keys := make([][]byte, 0, batchSize) + values := make([][]byte, 0, batchSize) + + for i := 0; i < batchSize; i++ { + key := randKey(startKey, endKey, maxLen) + keys = append(keys, key) + value := randValue() + values = append(values, value) + } + + err := client.BatchPut(keys, values) + if err != nil { + errCh <- errors.Trace(err) + } + } + }() + } + + err := <-errCh + if err != nil { + return errors.Trace(err) + } + + return nil +} + +func testRandKey(startKey, endKey []byte, maxLen int) { + for { + k := randKey(startKey, endKey, maxLen) + if bytes.Compare(k, startKey) < 0 || bytes.Compare(k, endKey) >= 0 { + panic(hex.EncodeToString(k)) + } + } +} + +func randKey(startKey, endKey []byte, maxLen int) []byte { +Retry: + for { // Regenerate on fail + result := make([]byte, 0, maxLen) + + upperUnbounded := false + lowerUnbounded := false + + for i := 0; i < maxLen; i++ { + upperBound := 256 + if !upperUnbounded { + if i >= len(endKey) { + // The generated key is the same as endKey which is invalid. Regenerate it. + continue Retry + } + upperBound = int(endKey[i]) + 1 + } + + lowerBound := 0 + if !lowerUnbounded { + if i >= len(startKey) { + lowerUnbounded = true + } else { + lowerBound = int(startKey[i]) + } + } + + if lowerUnbounded { + if rand.Intn(257) == 0 { + return result + } + } + + value := rand.Intn(upperBound - lowerBound) + value += lowerBound + + if value < upperBound-1 { + upperUnbounded = true + } + if value > lowerBound { + lowerUnbounded = true + } + + result = append(result, uint8(value)) + } + + return result + } +} + +func randValue() []byte { + result := make([]byte, 0, 512) + for i := 0; i < 512; i++ { + value := rand.Intn(257) + if value == 256 { + if i > 0 { + return result + } + value-- + } + result = append(result, uint8(value)) + } + return result +} + +func checksum(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start checkcum on range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + + scanner := newRawKVScanner(client, startKey, endKey) + digest := crc64.New(crc64.MakeTable(crc64.ECMA)) + + var res uint64 + + for { + k, v, err := scanner.Next() + if err != nil { + return errors.Trace(err) + } + if len(k) == 0 { + break + } + _, _ = digest.Write(k) + _, _ = digest.Write(v) + res ^= digest.Sum64() + } + + fmt.Printf("Checksum result: %016x\n", res) + return nil +} + +func deleteRange(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start delete data in range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + return client.DeleteRange(startKey, endKey) +} + +func scan(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start scanning data in range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + + scanner := newRawKVScanner(client, startKey, endKey) + + var key []byte + for { + k, v, err := scanner.Next() + if err != nil { + return errors.Trace(err) + } + if len(k) == 0 { + break + } + fmt.Printf("key: %v, value: %v\n", hex.EncodeToString(k), hex.EncodeToString(v)) + if bytes.Compare(key, k) >= 0 { + log.Errorf("Scan result is not in order. "+ + "Previous key: %v, Current key: %v", + hex.EncodeToString(key), hex.EncodeToString(k)) + } + } + + log.Infof("Finished Scanning.") + return nil +} + +const defaultScanBatchSize = 128 + +type rawKVScanner struct { + client *tikv.RawKVClient + batchSize int + + currentKey []byte + endKey []byte + + bufferKeys [][]byte + bufferValues [][]byte + bufferCursor int + noMore bool +} + +func newRawKVScanner(client *tikv.RawKVClient, startKey, endKey []byte) *rawKVScanner { + return &rawKVScanner{ + client: client, + batchSize: defaultScanBatchSize, + + currentKey: startKey, + endKey: endKey, + + noMore: false, + } +} + +func (s *rawKVScanner) Next() ([]byte, []byte, error) { + if s.bufferCursor >= len(s.bufferKeys) { + if s.noMore { + return nil, nil, nil + } + + s.bufferCursor = 0 + + batchSize := s.batchSize + var err error + s.bufferKeys, s.bufferValues, err = s.client.Scan(s.currentKey, s.endKey, batchSize) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if len(s.bufferKeys) < batchSize { + s.noMore = true + } + + if len(s.bufferKeys) == 0 { + return nil, nil, nil + } + + bufferKey := s.bufferKeys[len(s.bufferKeys)-1] + bufferKey = append(bufferKey, 0) + s.currentKey = bufferKey + } + + key := s.bufferKeys[s.bufferCursor] + value := s.bufferValues[s.bufferCursor] + s.bufferCursor++ + return key, value, nil +} diff --git a/tests/br_rawkv/run.sh b/tests/br_rawkv/run.sh new file mode 100644 index 000000000..f57e76827 --- /dev/null +++ b/tests/br_rawkv/run.sh @@ -0,0 +1,85 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +BACKUP_DIR="raw_backup" + +checksum() { + bin/rawkv --pd $PD_ADDR --mode checksum --start-key $1 --end-key $2 | grep result | awk '{print $3}' +} + +fail_and_exit() { + echo "TEST: [$TEST_NAME] failed!" + exit 1 +} + +checksum_empty=$(checksum 31 3130303030303030) + +# generate raw kv randomly in range[start-key, end-key) in 10s +bin/rawkv --pd $PD_ADDR --mode rand-gen --start-key 31 --end-key 3130303030303030 --duration 10 + +checksum_ori=$(checksum 31 3130303030303030) +checksum_partial=$(checksum 311111 311122) + +# backup rawkv +echo "backup start..." +run_br --pd $PD_ADDR backup raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 + +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit +fi + +# restore rawkv +echo "restore start..." +run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 + +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_ori" ];then + echo "checksum failed after restore" + fail_and_exit +fi + +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit +fi + +# FIXME restore rawkv partially after change endkey to inclusive +# echo "restore start..." +# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 311111 --end 311122 --format hex --concurrency 4 +# +# checksum_new=$(checksum 31 3130303030303030) +# +# if [ "$checksum_new" != "$checksum_partial" ];then +# echo "checksum failed after restore" +# fail_and_exit +# fi + +echo "TEST: [$TEST_NAME] successed!" diff --git a/tests/br_s3/run.sh b/tests/br_s3/run.sh new file mode 100755 index 000000000..422a1270d --- /dev/null +++ b/tests/br_s3/run.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux +DB="$TEST_NAME" +TABLE="usertable" +DB_COUNT=3 + +# start the s3 server +export MINIO_ACCESS_KEY=brs3accesskey +export MINIO_SECRET_KEY=brs3secretkey +export MINIO_BROWSER=off +export AWS_ACCESS_KEY_ID=$MINIO_ACCESS_KEY +export AWS_SECRET_ACCESS_KEY=$MINIO_SECRET_KEY +export S3_ENDPOINT=127.0.0.1:24927 +rm -rf "$TEST_DIR/$DB" +mkdir -p "$TEST_DIR/$DB" +bin/minio server --address $S3_ENDPOINT "$TEST_DIR/$DB" & +MINIO_PID=$! +i=0 +while ! curl -o /dev/null -v -s "http://$S3_ENDPOINT/"; do + i=$(($i+1)) + if [ $i -gt 7 ]; then + echo 'Failed to start minio' + exit 1 + fi + sleep 2 +done + +stop_minio() { + kill -2 $MINIO_PID +} +trap stop_minio EXIT + +s3cmd --access_key=$MINIO_ACCESS_KEY --secret_key=$MINIO_SECRET_KEY --host=$S3_ENDPOINT --host-bucket=$S3_ENDPOINT --no-ssl mb s3://mybucket + +# Fill in the database +for i in $(seq $DB_COUNT); do + run_sql "CREATE DATABASE $DB${i};" + go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB${i} +done + +for i in $(seq $DB_COUNT); do + row_count_ori[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') +done + +# backup full +echo "backup start..." +run_br --pd $PD_ADDR backup full -s "s3://mybucket/$DB" --s3.endpoint="http://$S3_ENDPOINT" + +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB${i};" +done + +# restore full +echo "restore start..." +run_br restore full -s "s3://mybucket/$DB" --pd $PD_ADDR --s3.endpoint="http://$S3_ENDPOINT" + +for i in $(seq $DB_COUNT); do + row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') +done + +fail=false +for i in $(seq $DB_COUNT); do + if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then + fail=true + echo "TEST: [$TEST_NAME] fail on database $DB${i}" + fi + echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" +done + +if $fail; then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +else + echo "TEST: [$TEST_NAME] successed!" +fi + +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB${i};" +done diff --git a/tests/br_s3/workload b/tests/br_s3/workload new file mode 100644 index 000000000..19336335e --- /dev/null +++ b/tests/br_s3/workload @@ -0,0 +1,12 @@ +recordcount=5000 +operationcount=0 +workload=core + +readallfields=true + +readproportion=0 +updateproportion=0 +scanproportion=0 +insertproportion=0 + +requestdistribution=uniform diff --git a/tests/br_table_partition/run.sh b/tests/br_table_partition/run.sh index fe0ce874b..ce7fe1df1 100755 --- a/tests/br_table_partition/run.sh +++ b/tests/br_table_partition/run.sh @@ -30,25 +30,23 @@ done echo "backup start..." run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 -for i in $(seq $DB_COUNT); do - run_sql "DROP DATABASE $DB${i};" -done +run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR -for i in $(seq $DB_COUNT); do +for i in $(seq $TABLE_COUNT); do row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE${i};" | awk '/COUNT/{print $2}') done fail=false -for i in $(seq $DB_COUNT); do +for i in $(seq $TABLE_COUNT); do if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then fail=true - echo "TEST: [$TEST_NAME] fail on database $DB${i}" + echo "TEST: [$TEST_NAME] fail on table $DB.$TABLE${i}" fi - echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" + echo "table $DB.$TABLE${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" done if $fail; then diff --git a/tests/br_tls/certificates/ca.pem b/tests/br_tls/certificates/ca.pem new file mode 100644 index 000000000..49098d653 --- /dev/null +++ b/tests/br_tls/certificates/ca.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDgDCCAmigAwIBAgIUHWvlRJydvYTR0ot3b8f6IlSHcGUwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwNzQxMDBaGA8yMTIwMDEyNTA3NDEwMFowVzELMAkGA1UEBhMCQ04xEDAO +BgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxEDAOBgNVBAoTB1BpbmdD +QVAxEjAQBgNVBAMTCU15IG93biBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOAdNtjanFhPaKJHQjr7+h/Cpps5bLc6S1vmgi/EIi9PKv3eyDgtlW1r +As2sjXRMHjcuZp2hHJ9r9FrMQD1rQQq5vJzQqM+eyWLc2tyZWXNWkZVvpjU4Hy5k +jZFLXoyHgAvps/LGu81F5Lk5CvLHswWTyGQUCFi1l/cYcQg6AExh2pO/WJu4hQhe +1mBBIKsJhZ5b5tWruLeI+YIjD1oo1ADMHYLK1BHON2fUmUHRGbrYKu4yCuyip3wn +rbVlpabn7l1JBMatCUJLHR6VWQ2MNjrOXAEUYm4xGEN+tUYyUOGl5mHFguLl3OIn +wj+1dT3WOr/NraPYlwVOnAd9GNbPJj0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG +MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ0CEqxLwEpI6J2gYJRg15oWZrj/ +MA0GCSqGSIb3DQEBCwUAA4IBAQCf8xRf7q1xAaGrc9HCPvN4OFkxDwz1CifrvrLR +ZgIWGUdCHDW2D1IiWKZQWeJKC1otA5x0hrS5kEGfkLFhADEU4txwp70DQaBArPti +pSgheIEbaT0H3BUTYSgS3VL2HjxN5OVMN6jNG3rWyxnJpNOCsJhhJXPK50CRZ7fk +Dcodj6FfEM2bfp2bGkxyVtUch7eepfUVbslXa7jE7Y8M3cr9NoLUcSP6D1RJWkNd +dBQoUsb6Ckq27ozEKOgwuBVv4BrrbFN//+7WHP8Vy6sSMyd+dJLBi6wehJjQhIz6 +vqLWE81rSJuxZqjLpCkFdeEF+9SRjWegU0ZDM4V+YeX53BPC +-----END CERTIFICATE----- diff --git a/tests/br_tls/certificates/client-key.pem b/tests/br_tls/certificates/client-key.pem new file mode 100644 index 000000000..43b021796 --- /dev/null +++ b/tests/br_tls/certificates/client-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA06qb7HABWHrU4CvBUO/2hXGgobi/UlTqTrYGZoJqSrvhKCP6 +HOivZjyWaSMDIfrguN+0C+bd80/5XGMmwjDt8PuZ+Ef3jcJLuB1e+Kms0s5tiTng +6Z028PkSpGvKXjPebWu7zoxVpDcTGM6MmlZQqpOuIgwi+7WX/bIgIu9wooCvJEGq +hScG2wpvK3txnpsVA4eXXdFoH0R5mtqbxVFfhwKMDqga4pRwJStLTDiMrtUz+OKc +rMIrkH4ndhvm2UYTVvhHlkZ3ooeDYsu40NnvedwaE+Ii7EnmcSDF9PaCVrXSK9F/ +KNRXX/x67PMWCVqcNyGtRsCuDe7FnDfGpudVXwIDAQABAoIBAHAzW/v1U4FHe1hp +WUxCJ3eNSAzyFdja0mlu6+2i7B05gpz4lTiFz5RuQXzx5lM43a6iRpqYgsbbed+T +X5RIw5iehnuqCnvGpsSuLQ27Q7VrX30ChUrQ37LVFSC7Usak0B9IoIFYun0WBLV9 +p+KYJqKFLiU2McUj+bGtnoNmUVqRzXQosoQue/pS9OknZ3NU7FxiyI3o4ME8rDvv +9x4vc1zcqbGXTQB224kOT0xoYt8RTmIbHvkR6/yszAtHDBcdzhINVuf38lv9FvaN +FxymwsY4IKPumQZlOEzHvSnpHTrwBMFdXjqpX1VxQb3yznEK+01MHf/tYsiU57IS +WVQMTeECgYEA7Fk0w66LGgdeeWrNTSTBCBPTofDVmR7Tro6k++5XTRt552ZoVz2l +8Lfi/Px5hIyve9gnM7slWrQ72JIQ5xVYZHtic3iwVFuRAD/QVfWU/SNsRsSB/93M +3BEumwJA6vN/qvkZueos3LOxN8kExk6GD0wIl6HjTeJPbbPHqmk6Pr0CgYEA5UQI +skaj8QGpjG8Hc10FeJpYsZiZA7gJaNu4RPqBj+1RHu/eYrL2mouooZdJfIJTmlTz +4NJcfb+Dl6qwbHUQ3mddhauFu1/YRwmaR5QKjwaBdeZbly9ljsRISFpjtosc7IBA +/Bl83xtbCboMdm7cH49X2CgRQ1uVFWraye0MBEsCgYEA43vtHFdYjaIAHa9dkV3J +6aNjtF/gxzNznXSwecfrAU1r5PydezLcEDh94vCDacAbe4EOIm2Dw6zsWUQlvrW9 +0WEs3mWQmnFTvECvnrz0PT2mDus/EO4EKuDi0dG2eC4MeJywVVB/A6J09XOnA9Q6 +lmihcIkiBinIN5etm2kS5aUCgYBCdcRnmZ6woKC7uvvX72FEosmPQgMpVtIzeW4j +YNLqHAtmAnbe+a4PAukxXp/I3ibKGFJSG+j/8uJ8tthJuG3ZavFrbFtqA9C4VwpI +MZwV9fbVbJ+kZfL0veWOQ9Wf9xe9Xzh3XBQcwNtVKH+wXVamN3FpkcPfWM8Q1Fb0 +LilLnQKBgQCq7+YlSnQX0rbmPTXVVb+B12rbqLDnqA0EuaVGrdu9zPPT04e5fpHU +SD33ibaEyeOF+zLg8T53whDbLJ0tURhUk+BlLTjdd99NXtyGMlfDnIsCnAeJhY8f +Iki6LYbbP2uWV4/5IDy9XW7J42Pfl9QyEVXq+PfTyPPjXC9/J4GOuw== +-----END RSA PRIVATE KEY----- diff --git a/tests/br_tls/certificates/client.pem b/tests/br_tls/certificates/client.pem new file mode 100644 index 000000000..7dace2f9d --- /dev/null +++ b/tests/br_tls/certificates/client.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIUaupI14PPUSshx7FmD7lsVPFahwAwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwNzQ4MDBaGA8yMTIwMDEyNTA3NDgwMFowETEPMA0GA1UEAxMGY2xpZW50 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA06qb7HABWHrU4CvBUO/2 +hXGgobi/UlTqTrYGZoJqSrvhKCP6HOivZjyWaSMDIfrguN+0C+bd80/5XGMmwjDt +8PuZ+Ef3jcJLuB1e+Kms0s5tiTng6Z028PkSpGvKXjPebWu7zoxVpDcTGM6MmlZQ +qpOuIgwi+7WX/bIgIu9wooCvJEGqhScG2wpvK3txnpsVA4eXXdFoH0R5mtqbxVFf +hwKMDqga4pRwJStLTDiMrtUz+OKcrMIrkH4ndhvm2UYTVvhHlkZ3ooeDYsu40Nnv +edwaE+Ii7EnmcSDF9PaCVrXSK9F/KNRXX/x67PMWCVqcNyGtRsCuDe7FnDfGpudV +XwIDAQABo4GDMIGAMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRqlq/slflqw/cdlE+xNcnmmxZwlTAf +BgNVHSMEGDAWgBSdAhKsS8BKSOidoGCUYNeaFma4/zALBgNVHREEBDACggAwDQYJ +KoZIhvcNAQELBQADggEBAMGC48O77wZHZRRxXIpTQDMUSpGTKks76l+s1N7sMLrG +DCQi/XFVfV8e/Z1qs224IyU1IGXXcdwK0Zfa9owUmVmiHE8lznv7m9m7j4BGOshc +pvnJaeuUluKR/QHzwpMsUKudoEyRjn09e0Jl0/IcsKh13rzgd458XR0ShCjxybo4 +nQ1aZb1wOPLG6tpUYsV+x2Coc6TgnJWJYlDbRfpIuj6y16T1kKuWzpm6VU3kbiJ9 +/nzDgauuJHIlXEWL9dBZcpzUibFswIQyGsK7c4AJrtY1OGx0/2nZIIjtGY3gtWyX +XGV9c4kM695gl5rJndB4IPl5GQeJBCNyIaVybh7Va70= +-----END CERTIFICATE----- diff --git a/tests/br_tls/certificates/server-key.pem b/tests/br_tls/certificates/server-key.pem new file mode 100644 index 000000000..2779d6ec6 --- /dev/null +++ b/tests/br_tls/certificates/server-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAq9mcQG/nSLREM2r7s2tCKCE/1KJQvV0xmkIglFD2VDDfYW+C +mBME5LNWbYR6L0yCVHU8B7aVnw1FsbiF4TpUY3w/r4mOGl7QbGivMYvRe6Nh2xUO +TvctwFyv2FvrtBX1rZ5/8QLbz1IFHOtTV7QUzLzWq3fSAiF1vhVsS3BUmh6QvWU8 +q9dylpmUQ22otSRXmirwEzFt9K+w3VK9Z6aac7e2XRurVPxbqgQUq2bblUhii8Fc +dCUA8NenlWp+H64mN2TzVaGb5Csr7SNS7AWDEPKfoo7W3H7bzKlmRVcPeRdftwti +SI1jfboxprya/nbTyBPE/yfLU/SYn/b89epziwIDAQABAoIBACPlI08OULgN90Tq +LsLuP3ZUY5nNgaHcKnU3JMj2FE3Hm5ElkpijOF1w3Dep+T+R8pMjnbNavuvnAMy7 +ZzOBVIknNcI7sDPv5AcQ4q8trkbt/I2fW0rBNIw+j/hYUuZdw+BNABpeZ31pe2nr ++Y+TLNkLBKfyMiqBxK88mE81mmZKblyvXCawW0A/iDDJ7fPNqoGF+y9ylTYaNRPk +aJGnaEZobJ4Lm5tSqW4gRX2ft6Hm67RkvVaopPFnlkvfusXUTFUqEVQCURRUqXbf +1ah2chUHxj22UdY9540H5yVNgEP3oR+uS/hbZqxKcJUTznUW5th3CyQPIKMlGlcB +p+zWlTECgYEAxlY4zGJw4QQwGYMKLyWCSHUgKYrKu2Ub2JKJFMTdsoj9H7DI+WHf +lQaO9NCOo2lt0ofYM1MzEpI5Cl/aMrPw+mwquBbxWdMHXK2eSsUQOVo9HtUjgK2t +J2AYFCfsYndo+hCj3ApMHgiY3sghTCXeycvT52bm11VeNVcs3pKxIYMCgYEA3dAJ +PwIfAB8t+6JCP2yYH4ExNjoMNYMdXqhz4vt3UGwgskRqTW6qdd9JvrRQ/JPvGpDy +T375h/+lLw0E4ljsnOPGSzbXNf4bYRHTwPOL+LqVM4Bg90hjclqphElHChxep1di +WcdArB0oae/l4M96z3GjfnXIUVOp8K6BUQCab1kCgYAFFAQUR5j4SfEpVg+WsXEq +hcUzCxixv5785pOX8opynctNWmtq5zSgTjCu2AAu8u4a69t/ROwT16aaO2YM0kqj +Ps3BNOUtFZgkqVVaOL13mnXiKjbkfo3majFzoqoMw13uuSpY4fKc+j9fxOQFXRrd +M9jTHfFfJhJpbzf44uyiHQKBgFIPwzvyVvG+l05/Ky83x9fv/frn4thxV45LmAQj +sHKqbjZFpWZcSOgu4aOSJlwrhsw3T84lVcAAzmXn1STAbVll01jEQz6QciSpacP6 +1pAAx240UqtptpD6BbkROxz8ffA/Hf3E/6Itb2QyAsP3PqI8kpYYkTG1WCvZA7Kq +HHiRAoGAXbUZ25LcrmyuxKWpbty8fck1tjKPvclQB35rOx6vgnfW6pcKMeebYvgq +nJka/QunEReOH/kGxAd/+ymvUBuFQCfFg3Aus+DtAuh9AkBr+cIyPjJqynnIT87J +MbkOw4uEhDJAtGUR9o1j83N1f05bnEwssXiXR0LZPylb9Qzc4tg= +-----END RSA PRIVATE KEY----- diff --git a/tests/br_tls/certificates/server.pem b/tests/br_tls/certificates/server.pem new file mode 100644 index 000000000..ea5ef2d5f --- /dev/null +++ b/tests/br_tls/certificates/server.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIUWBTDQm4xOYDxZBTkpCQouREtT8QwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwOTExMDBaGA8yMTIwMDEyNTA5MTEwMFowFjEUMBIGA1UEAxMLdGlkYi1z +ZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCr2ZxAb+dItEQz +avuza0IoIT/UolC9XTGaQiCUUPZUMN9hb4KYEwTks1ZthHovTIJUdTwHtpWfDUWx +uIXhOlRjfD+viY4aXtBsaK8xi9F7o2HbFQ5O9y3AXK/YW+u0FfWtnn/xAtvPUgUc +61NXtBTMvNard9ICIXW+FWxLcFSaHpC9ZTyr13KWmZRDbai1JFeaKvATMW30r7Dd +Ur1npppzt7ZdG6tU/FuqBBSrZtuVSGKLwVx0JQDw16eVan4friY3ZPNVoZvkKyvt +I1LsBYMQ8p+ijtbcftvMqWZFVw95F1+3C2JIjWN9ujGmvJr+dtPIE8T/J8tT9Jif +9vz16nOLAgMBAAGjgZEwgY4wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsG +AQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRVB/Bvdzvh +6WQRWpc9SzcbXLz77zAfBgNVHSMEGDAWgBSdAhKsS8BKSOidoGCUYNeaFma4/zAP +BgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAAqg5pgGQqORKRSdlY +wzVvzKaulpvjZfVMM6YiOUtmlU0CGWq7E3gLFzkvebpU0KsFlbyZ92h/2Fw5Ay2b +kxkCy18mJ4lGkvF0cU4UD3XheFMvD2QWWRX4WPpAhStofrWOXeyq3Div2+fQjMJd +kyeWUzPU7T467IWUHOWNsFAjfVHNsmG45qLGt+XQckHTvASX5IvN+5tkRUCW30vO +b3BdDQUFglGTUFU2epaZGTti0SYiRiY+9R3zFWX4uBcEBYhk9e/0BU8FqdWW5GjI +pFpH9t64CjKIdRQXpIn4cogK/GwyuRuDPV/RkMjrIqOi7pGejXwyDe9avHFVR6re +oowA +-----END CERTIFICATE----- diff --git a/tests/br_tls/config/pd.toml b/tests/br_tls/config/pd.toml new file mode 100644 index 000000000..69cb94b6f --- /dev/null +++ b/tests/br_tls/config/pd.toml @@ -0,0 +1,9 @@ +# config of pd + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "tests/br_tls/certificates/ca.pem" +# Path of file that contains X509 certificate in PEM format. +cert-path = "tests/br_tls/certificates/server.pem" +# Path of file that contains X509 key in PEM format. +key-path = "tests/br_tls/certificates/server-key.pem" diff --git a/tests/br_tls/config/tidb.toml b/tests/br_tls/config/tidb.toml new file mode 100644 index 000000000..48a783332 --- /dev/null +++ b/tests/br_tls/config/tidb.toml @@ -0,0 +1,14 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whether BR is gracefully shutdown. +lease = "360s" + +[security] +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "tests/br_tls/certificates/ca.pem" +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "tests/br_tls/certificates/server.pem" +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "tests/br_tls/certificates/server-key.pem" \ No newline at end of file diff --git a/tests/br_tls/config/tikv.toml b/tests/br_tls/config/tikv.toml new file mode 100644 index 000000000..b4859a731 --- /dev/null +++ b/tests/br_tls/config/tikv.toml @@ -0,0 +1,19 @@ +# config of tikv + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +ca-path = "tests/br_tls/certificates/ca.pem" +cert-path = "tests/br_tls/certificates/server.pem" +key-path = "tests/br_tls/certificates/server-key.pem" diff --git a/tests/br_tls/run.sh b/tests/br_tls/run.sh new file mode 100755 index 000000000..9c494b700 --- /dev/null +++ b/tests/br_tls/run.sh @@ -0,0 +1,67 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/run_services + +DB="$TEST_NAME" +TABLE="usertable1" +TABLE2="usertable2" + +echo "Restart cluster with tls" +start_services_withTLS "$cur" + +run_sql "DROP DATABASE IF EXISTS $DB;" +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.$TABLE( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.$TABLE VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.$TABLE2( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.$TABLE2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --ca $cur/certificates/ca.pem --cert $cur/certificates/client.pem --key $cur/certificates/client-key.pem + +run_sql "DROP DATABASE $DB;" + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --ca $cur/certificates/ca.pem --cert $cur/certificates/client.pem --key $cur/certificates/client-key.pem + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" + +echo "Restart service without tls" +start_services diff --git a/tests/br_z_gc_safepoint/gc.go b/tests/br_z_gc_safepoint/gc.go index a18367259..7bc223aac 100644 --- a/tests/br_z_gc_safepoint/gc.go +++ b/tests/br_z_gc_safepoint/gc.go @@ -21,7 +21,7 @@ import ( "time" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v3/client" "github.com/pingcap/tidb/store/tikv/oracle" "go.uber.org/zap" ) diff --git a/tests/br_z_gc_safepoint/run.sh b/tests/br_z_gc_safepoint/run.sh index 916ca1fa8..a76e97501 100755 --- a/tests/br_z_gc_safepoint/run.sh +++ b/tests/br_z_gc_safepoint/run.sh @@ -23,6 +23,8 @@ set -eu DB="$TEST_NAME" TABLE="usertable" +MAX_UINT64=9223372036854775807 + run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB @@ -39,7 +41,25 @@ echo "backup start (expect fail)..." run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 if [ "$backup_gc_fail" -ne "1" ];then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] test check backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts 1 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check last backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup with max_uint64 start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts $MAX_UINT64 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check max backup ts failed!" exit 1 fi diff --git a/tests/config/restore-tikv.toml b/tests/config/restore-tikv.toml new file mode 100644 index 000000000..010711cd4 --- /dev/null +++ b/tests/config/restore-tikv.toml @@ -0,0 +1,17 @@ +# config of tikv + +[server] +labels = { exclusive = "restore" } + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false +capacity = "10GB" \ No newline at end of file diff --git a/tests/config/tikv.toml b/tests/config/tikv.toml index e93a16597..73323d878 100644 --- a/tests/config/tikv.toml +++ b/tests/config/tikv.toml @@ -11,3 +11,4 @@ max-open-files = 4096 [raftstore] # true (default value) for high reliability, this can prevent data loss when power failure. sync-log = false +capacity = "10GB" \ No newline at end of file diff --git a/tests/download_tools.sh b/tests/download_tools.sh new file mode 100755 index 000000000..e0689dd61 --- /dev/null +++ b/tests/download_tools.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download tools for running the integration test + +set -eu + +BIN="$(dirname "$0")/../bin" + +if [ "$(uname -s)" != Linux ]; then + echo 'Can only automatically download binaries on Linux.' + exit 1 +fi + +MISSING_TIDB_COMPONENTS= +for COMPONENT in tidb-server pd-server tikv-server pd-ctl; do + if [ ! -e "$BIN/$COMPONENT" ]; then + MISSING_TIDB_COMPONENTS="$MISSING_TIDB_COMPONENTS tidb-latest-linux-amd64/bin/$COMPONENT" + fi +done + +if [ -n "$MISSING_TIDB_COMPONENTS" ]; then + echo "Downloading latest TiDB bundle..." + # TODO: the url is going to change from 'latest' to 'nightly' someday. + curl -L -f -o "$BIN/tidb.tar.gz" "https://download.pingcap.org/tidb-latest-linux-amd64.tar.gz" + tar -x -f "$BIN/tidb.tar.gz" -C "$BIN/" $MISSING_TIDB_COMPONENTS + rm "$BIN/tidb.tar.gz" + mv "$BIN"/tidb-latest-linux-amd64/bin/* "$BIN/" + rmdir "$BIN/tidb-latest-linux-amd64/bin" + rmdir "$BIN/tidb-latest-linux-amd64" +fi + +if [ ! -e "$BIN/go-ycsb" ]; then + # TODO: replace this once there's a public downloadable release. + echo 'go-ycsb is missing. Please build manually following https://github.com/pingcap/go-ycsb#getting-started' + exit 1 +fi + +if [ ! -e "$BIN/minio" ]; then + echo "Downloading minio..." + curl -L -f -o "$BIN/minio" "https://dl.min.io/server/minio/release/linux-amd64/minio" + chmod a+x "$BIN/minio" +fi + +echo "All binaries are now available." diff --git a/tests/run.sh b/tests/run.sh index 3cedc7093..5b1111afd 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # Copyright 2019 PingCAP, Inc. # @@ -14,83 +14,11 @@ # limitations under the License. set -eu +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/_utils/run_services -TEST_DIR=/tmp/backup_restore_test - -PD_ADDR="127.0.0.1:2379" -TIDB_IP="127.0.0.1" -TIDB_PORT="4000" -TIDB_ADDR="127.0.0.1:4000" -TIDB_STATUS_ADDR="127.0.0.1:10080" -# actaul tikv_addr are TIKV_ADDR${i} -TIKV_ADDR="127.0.0.1:2016" -TIKV_COUNT=4 - -stop_services() { - killall -9 tikv-server || true - killall -9 pd-server || true - killall -9 tidb-server || true - - find "$TEST_DIR" -maxdepth 1 -not -path "$TEST_DIR" -not -name "*.log" | xargs rm -r || true -} - -start_services() { - stop_services - - mkdir -p "$TEST_DIR" - rm -f "$TEST_DIR"/*.log - - echo "Starting PD..." - bin/pd-server \ - --client-urls "http://$PD_ADDR" \ - --log-file "$TEST_DIR/pd.log" \ - --data-dir "$TEST_DIR/pd" & - # wait until PD is online... - while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do - sleep 1 - done - - echo "Starting TiKV..." - for i in $(seq $TIKV_COUNT); do - bin/tikv-server \ - --pd "$PD_ADDR" \ - -A "$TIKV_ADDR$i" \ - --log-file "$TEST_DIR/tikv${i}.log" \ - -C "tests/config/tikv.toml" \ - -s "$TEST_DIR/tikv${i}" & - done - sleep 1 - - echo "Starting TiDB..." - bin/tidb-server \ - -P 4000 \ - --status 10080 \ - --store tikv \ - --path "$PD_ADDR" \ - --config "tests/config/tidb.toml" \ - --log-file "$TEST_DIR/tidb.log" & - - echo "Verifying TiDB is started..." - i=0 - while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do - i=$((i+1)) - if [ "$i" -gt 10 ]; then - echo 'Failed to start TiDB' - exit 1 - fi - sleep 3 - done - - i=0 - while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do - i=$((i+1)) - if [ "$i" -gt 10 ]; then - echo 'Failed to bootstrap cluster' - exit 1 - fi - sleep 3 - done -} +mkdir -p "$TEST_DIR" +rm -f "$TEST_DIR"/*.log trap stop_services EXIT start_services @@ -100,7 +28,7 @@ if [ "${1-}" = '--debug' ]; then read line fi -for script in tests/*/run.sh; do +for script in tests/${TEST_NAME-*}/run.sh; do echo "*===== Running test $script... =====*" TEST_DIR="$TEST_DIR" \ PD_ADDR="$PD_ADDR" \ @@ -111,5 +39,6 @@ for script in tests/*/run.sh; do TIKV_ADDR="$TIKV_ADDR" \ PATH="tests/_utils:bin:$PATH" \ TEST_NAME="$(basename "$(dirname "$script")")" \ - sh "$script" + BR_LOG_TO_TERM=1 \ + bash "$script" done diff --git a/tools.json b/tools.json index e3dd19414..2b41d4fce 100644 --- a/tools.json +++ b/tools.json @@ -18,7 +18,7 @@ }, { "Repository": "github.com/golangci/golangci-lint/cmd/golangci-lint", - "Commit": "901cf25e20f86b7e9dc6f73eaba5afbd0cbdc257" + "Commit": "b9eef79121fff235d0d794c176ffa2b3d9bd422f" } ], "RetoolVersion": "1.3.7"