Skip to content

Commit

Permalink
This is an automated cherry-pick of #44129
Browse files Browse the repository at this point in the history
Signed-off-by: ti-chi-bot <ti-community-prow-bot@tidb.io>
  • Loading branch information
lcwangchao authored and ti-chi-bot committed May 24, 2023
1 parent 71b1eca commit 223dd7a
Show file tree
Hide file tree
Showing 3 changed files with 376 additions and 142 deletions.
69 changes: 69 additions & 0 deletions ddl/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,25 @@ func alterTablePartitionBundles(t *meta.Meta, tblInfo *model.TableInfo, addingDe
return bundles, nil
}

// When drop/truncate a partition, we should still keep the dropped partition's placement settings to avoid unnecessary region schedules.
// When a partition is not configured with a placement policy directly, its rule is in the table's placement group which will be deleted after
// partition truncated/dropped. So it is necessary to create a standalone placement group with partition id after it.
func droppedPartitionBundles(t *meta.Meta, tblInfo *model.TableInfo, dropPartitions []model.PartitionDefinition) ([]*placement.Bundle, error) {
partitions := make([]model.PartitionDefinition, 0, len(dropPartitions))
for _, def := range dropPartitions {
def = def.Clone()
if def.PlacementPolicyRef == nil {
def.PlacementPolicyRef = tblInfo.PlacementPolicyRef
}

if def.PlacementPolicyRef != nil {
partitions = append(partitions, def)
}
}

return placement.NewPartitionListBundles(t, partitions)
}

// updatePartitionInfo merge `addingDefinitions` into `Definitions` in the tableInfo.
func updatePartitionInfo(tblInfo *model.TableInfo) {
parInfo := &model.PartitionInfo{}
Expand Down Expand Up @@ -1820,6 +1839,32 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (
return ver, err
}

var bundles []*placement.Bundle
// create placement groups for each dropped partition to keep the data's placement before GC
// These placements groups will be deleted after GC
bundles, err = droppedPartitionBundles(t, tblInfo, tblInfo.Partition.DroppingDefinitions)
if err != nil {
job.State = model.JobStateCancelled
return ver, err
}

var tableBundle *placement.Bundle
// Recompute table bundle to remove dropped partitions rules from its group
tableBundle, err = placement.NewTableBundle(t, tblInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}

if tableBundle != nil {
bundles = append(bundles, tableBundle)
}

if err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles); err != nil {
job.State = model.JobStateCancelled
return ver, err
}

job.SchemaState = model.StateDeleteOnly
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != job.SchemaState)
case model.StateDeleteOnly:
Expand Down Expand Up @@ -1915,11 +1960,13 @@ func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, e
return ver, errors.Trace(dbterror.ErrPartitionMgmtOnNonpartitioned)
}

oldPartitions := make([]model.PartitionDefinition, 0, len(oldIDs))
newPartitions := make([]model.PartitionDefinition, 0, len(oldIDs))
for _, oldID := range oldIDs {
for i := 0; i < len(pi.Definitions); i++ {
def := &pi.Definitions[i]
if def.ID == oldID {
oldPartitions = append(oldPartitions, def.Clone())
pid, err1 := t.GenGlobalID()
if err1 != nil {
return ver, errors.Trace(err1)
Expand Down Expand Up @@ -1967,6 +2014,28 @@ func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, e
return ver, errors.Trace(err)
}

<<<<<<< HEAD
=======
tableBundle, err := placement.NewTableBundle(t, tblInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}

if tableBundle != nil {
bundles = append(bundles, tableBundle)
}

// create placement groups for each dropped partition to keep the data's placement before GC
// These placements groups will be deleted after GC
keepDroppedBundles, err := droppedPartitionBundles(t, tblInfo, oldPartitions)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
bundles = append(bundles, keepDroppedBundles...)

>>>>>>> f7ccac1c230 (ddl: fix the placement behavior when drop/truncate partitions (#44129))
err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles)
if err != nil {
job.State = model.JobStateCancelled
Expand Down
Loading

0 comments on commit 223dd7a

Please sign in to comment.