-
Notifications
You must be signed in to change notification settings - Fork 288
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
syncer(dm): implement start-task --start-time #4485
Changes from 6 commits
85de7de
43cc6f0
5ef486d
c669736
3799496
c61d696
d790138
ae472da
71a9cda
3ba5edd
eed3ec1
715b434
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -227,6 +227,9 @@ type CheckPoint interface { | |
// DeleteTablePoint deletes checkpoint for specified table in memory and storage | ||
DeleteTablePoint(tctx *tcontext.Context, table *filter.Table) error | ||
|
||
// DeleteAllTablePoint deletes all checkpoints for table in memory and storage | ||
DeleteAllTablePoint(tctx *tcontext.Context) error | ||
|
||
// DeleteSchemaPoint deletes checkpoint for specified schema | ||
DeleteSchemaPoint(tctx *tcontext.Context, sourceSchema string) error | ||
|
||
|
@@ -237,10 +240,13 @@ type CheckPoint interface { | |
// corresponding to Meta.Save | ||
SaveGlobalPoint(point binlog.Location) | ||
|
||
// ResetGlobalPoint saves the global binlog stream's checkpoint forcibly. | ||
D3Hunter marked this conversation as resolved.
Show resolved
Hide resolved
|
||
ResetGlobalPoint(location binlog.Location) | ||
|
||
// Snapshot make a snapshot of current checkpoint | ||
Snapshot(isSyncFlush bool) *SnapshotInfo | ||
|
||
// FlushGlobalPointsExcept flushes the global checkpoint and tables' | ||
// FlushPointsExcept flushes the global checkpoint and tables' | ||
// checkpoints except exceptTables, it also flushes SQLs with Args providing | ||
// by extraSQLs and extraArgs. Currently extraSQLs contain shard meta only. | ||
// @exceptTables: [[schema, table]... ] | ||
|
@@ -551,6 +557,24 @@ func (cp *RemoteCheckPoint) DeleteTablePoint(tctx *tcontext.Context, table *filt | |
return nil | ||
} | ||
|
||
// DeleteAllTablePoint implements CheckPoint.DeleteAllTablePoint. | ||
func (cp *RemoteCheckPoint) DeleteAllTablePoint(tctx *tcontext.Context) error { | ||
cp.Lock() | ||
defer cp.Unlock() | ||
|
||
cp.logCtx.L().Info("delete all table checkpoint") | ||
_, err := cp.dbConn.ExecuteSQL( | ||
tctx, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we use maxDMLConnectionDuration or defaultDBTimeout? |
||
[]string{`DELETE FROM ` + cp.tableName + ` WHERE id = ? AND is_global = ?`}, | ||
[]interface{}{cp.id, false}, | ||
) | ||
if err != nil { | ||
return err | ||
} | ||
cp.points = make(map[string]map[string]*binlogPoint) | ||
return nil | ||
} | ||
|
||
// DeleteSchemaPoint implements CheckPoint.DeleteSchemaPoint. | ||
func (cp *RemoteCheckPoint) DeleteSchemaPoint(tctx *tcontext.Context, sourceSchema string) error { | ||
cp.Lock() | ||
|
@@ -614,7 +638,16 @@ func (cp *RemoteCheckPoint) SaveGlobalPoint(location binlog.Location) { | |
} | ||
} | ||
|
||
// FlushPointsExcept implements CheckPoint.FlushSnapshotPointsExcept. | ||
// ResetGlobalPoint implements CheckPoint.ResetGlobalPoint. | ||
func (cp *RemoteCheckPoint) ResetGlobalPoint(location binlog.Location) { | ||
cp.Lock() | ||
defer cp.Unlock() | ||
|
||
cp.logCtx.L().Debug("reset global checkpoint", zap.Stringer("location", location)) | ||
Ehco1996 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
cp.globalPoint = newBinlogPoint(location, binlog.NewLocation(cp.cfg.Flavor), nil, nil, cp.cfg.EnableGTID) | ||
} | ||
|
||
// FlushPointsExcept implements CheckPoint.FlushPointsExcept. | ||
func (cp *RemoteCheckPoint) FlushPointsExcept( | ||
tctx *tcontext.Context, | ||
snapshotID int, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -127,6 +127,7 @@ type Syncer struct { | |
|
||
cfg *config.SubTaskConfig | ||
syncCfg replication.BinlogSyncerConfig | ||
cliArgs *config.TaskCliArgs | ||
|
||
sgk *ShardingGroupKeeper // keeper to keep all sharding (sub) group in this syncer | ||
pessimist *shardddl.Pessimist // shard DDL pessimist | ||
|
@@ -440,11 +441,13 @@ func (s *Syncer) Init(ctx context.Context) (err error) { | |
} | ||
|
||
// when Init syncer, set active relay log info | ||
err = s.setInitActiveRelayLog(ctx) | ||
if err != nil { | ||
return err | ||
if s.cfg.Meta == nil || s.cfg.Meta.BinLogName != binlog.FakeBinlogName { | ||
err = s.setInitActiveRelayLog(ctx) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. seems There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
we will risk the relay log being purged between Init and Run. I prefer we don't change old logic if we don't have enough thinking. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. but if we into this branch so how about make There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Init should not have long-running tasks by definition, it will cause whole pipeline timeout. |
||
if err != nil { | ||
return err | ||
} | ||
rollbackHolder.Add(fr.FuncRollback{Name: "remove-active-realylog", Fn: s.removeActiveRelayLog}) | ||
} | ||
rollbackHolder.Add(fr.FuncRollback{Name: "remove-active-realylog", Fn: s.removeActiveRelayLog}) | ||
|
||
s.reset() | ||
return nil | ||
|
@@ -1236,6 +1239,17 @@ func (s *Syncer) afterFlushCheckpoint(task *checkpointFlushTask) error { | |
s.lastCheckpointFlushedTime = now | ||
|
||
s.logAndClearFilteredStatistics() | ||
|
||
if s.cliArgs != nil && s.cliArgs.StartTime != "" { | ||
clone := *s.cliArgs | ||
clone.StartTime = "" | ||
err2 := ha.PutTaskCliArgs(s.cli, s.cfg.Name, []string{s.cfg.SourceID}, clone) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why not use DelTaskCliBySource? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we might have more args in near future, so for compatible I should only remove the least argument. |
||
if err2 != nil { | ||
s.tctx.L().Error("failed to clean start-time in task cli args", zap.Error(err2)) | ||
Ehco1996 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} else { | ||
s.cliArgs.StartTime = "" | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
|
@@ -1456,11 +1470,30 @@ func (s *Syncer) Run(ctx context.Context) (err error) { | |
} | ||
}() | ||
|
||
// some initialization that can't be put in Syncer.Init | ||
fresh, err := s.IsFreshTask(runCtx) | ||
if err != nil { | ||
return err | ||
} else if fresh { | ||
} | ||
|
||
// task command line arguments have the highest priority | ||
// dm-syncer and other usage may not have a etcdCli, so we check it first | ||
skipLoadMeta := false | ||
if s.cli != nil { | ||
Ehco1996 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
s.cliArgs, err = ha.GetTaskCliArgs(s.cli, s.cfg.Name, s.cfg.SourceID) | ||
if err != nil { | ||
s.tctx.L().Error("failed to get task cli args", zap.Error(err)) | ||
} | ||
if s.cliArgs != nil && s.cliArgs.StartTime != "" { | ||
err = s.setGlobalPointByTime(tctx, s.cliArgs.StartTime) | ||
if terror.ErrConfigStartTimeTooLate.Equal(err) { | ||
return err | ||
} | ||
skipLoadMeta = err == nil | ||
} | ||
} | ||
|
||
// some initialization that can't be put in Syncer.Init | ||
if fresh && !skipLoadMeta { | ||
// for fresh task, we try to load checkpoints from meta (file or config item) | ||
err = s.checkpoint.LoadMeta() | ||
if err != nil { | ||
|
@@ -3689,7 +3722,8 @@ func (s *Syncer) adjustGlobalPointGTID(tctx *tcontext.Context) (bool, error) { | |
// 1. GTID is not enabled | ||
// 2. location already has GTID position | ||
// 3. location is totally new, has no position info | ||
if !s.cfg.EnableGTID || location.GTIDSetStr() != "" || location.Position.Name == "" { | ||
// 4. location is too early thus not a COMMIT location, which happens when it's reset by other logic | ||
if !s.cfg.EnableGTID || location.GTIDSetStr() != "" || location.Position.Name == "" || location.Position.Pos == 4 { | ||
return false, nil | ||
} | ||
// set enableGTID to false for new streamerController | ||
|
@@ -3781,3 +3815,58 @@ func (s *Syncer) flushOptimisticTableInfos(tctx *tcontext.Context) { | |
tctx.L().Error("failed to flush table points with table infos", log.ShortError(err)) | ||
} | ||
} | ||
|
||
func (s *Syncer) setGlobalPointByTime(tctx *tcontext.Context, timeStr string) error { | ||
D3Hunter marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// we support two layout | ||
t, err := time.ParseInLocation(config.StartTimeFormat, timeStr, s.timezone) | ||
if err != nil { | ||
t, err = time.ParseInLocation(config.StartTimeFormat2, timeStr, s.timezone) | ||
} | ||
if err != nil { | ||
return err | ||
} | ||
|
||
var ( | ||
loc *binlog.Location | ||
posTp binlog.PosType | ||
) | ||
|
||
if s.relay != nil { | ||
subDir := s.relay.Status(nil).(*pb.RelayStatus).RelaySubDir | ||
relayDir := path.Join(s.cfg.RelayDir, subDir) | ||
finder := binlog.NewLocalBinlogPosFinder(tctx, s.cfg.EnableGTID, s.cfg.Flavor, relayDir) | ||
loc, posTp, err = finder.FindByTimestamp(t.Unix()) | ||
} else { | ||
finder := binlog.NewRemoteBinlogPosFinder(tctx, s.fromDB.BaseDB.DB, s.syncCfg, s.cfg.EnableGTID) | ||
loc, posTp, err = finder.FindByTimestamp(t.Unix()) | ||
} | ||
if err != nil { | ||
s.tctx.L().Error("fail to find binlog position by timestamp", | ||
zap.Time("time", t), | ||
zap.Error(err)) | ||
return err | ||
} | ||
|
||
switch posTp { | ||
case binlog.InRangeBinlogPos: | ||
Ehco1996 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
s.tctx.L().Info("find binlog position by timestamp", | ||
zap.String("time", timeStr), | ||
zap.Stringer("pos", loc)) | ||
case binlog.BelowLowerBoundBinlogPos: | ||
s.tctx.L().Warn("fail to find binlog location by timestamp because the timestamp is too early, will use the earliest binlog location", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will return error better? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. cc @sunzhaoyang |
||
zap.String("time", timeStr), | ||
zap.Any("location", loc)) | ||
case binlog.AboveUpperBoundBinlogPos: | ||
return terror.ErrConfigStartTimeTooLate.Generate(timeStr) | ||
} | ||
|
||
err = s.checkpoint.DeleteAllTablePoint(tctx) | ||
if err != nil { | ||
return err | ||
} | ||
s.checkpoint.ResetGlobalPoint(*loc) | ||
s.tctx.L().Info("Will replicate from the specified time, the location recorded in checkpoint and config file will be ignored", | ||
zap.String("time", timeStr), | ||
zap.Any("locationOfTheTime", loc)) | ||
return nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we set inst.Meta after cfg.Decode? So we no need RawDecode and Adjust, only depend on cfg.Decode.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
cfg.Decode
will calladjust
internally, then it will report error about inst.Meta.