Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ddl: fix the placement behavior when drop/truncate partitions (#44129) #44150

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions ddl/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,25 @@ func alterTablePartitionBundles(t *meta.Meta, tblInfo *model.TableInfo, addingDe
return bundles, nil
}

// When drop/truncate a partition, we should still keep the dropped partition's placement settings to avoid unnecessary region schedules.
// When a partition is not configured with a placement policy directly, its rule is in the table's placement group which will be deleted after
// partition truncated/dropped. So it is necessary to create a standalone placement group with partition id after it.
func droppedPartitionBundles(t *meta.Meta, tblInfo *model.TableInfo, dropPartitions []model.PartitionDefinition) ([]*placement.Bundle, error) {
partitions := make([]model.PartitionDefinition, 0, len(dropPartitions))
for _, def := range dropPartitions {
def = def.Clone()
if def.PlacementPolicyRef == nil {
def.PlacementPolicyRef = tblInfo.PlacementPolicyRef
}

if def.PlacementPolicyRef != nil {
partitions = append(partitions, def)
}
}

return placement.NewPartitionListBundles(t, partitions)
}

// updatePartitionInfo merge `addingDefinitions` into `Definitions` in the tableInfo.
func updatePartitionInfo(tblInfo *model.TableInfo) {
parInfo := &model.PartitionInfo{}
Expand Down Expand Up @@ -1820,6 +1839,32 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (
return ver, err
}

var bundles []*placement.Bundle
// create placement groups for each dropped partition to keep the data's placement before GC
// These placements groups will be deleted after GC
bundles, err = droppedPartitionBundles(t, tblInfo, tblInfo.Partition.DroppingDefinitions)
if err != nil {
job.State = model.JobStateCancelled
return ver, err
}

var tableBundle *placement.Bundle
// Recompute table bundle to remove dropped partitions rules from its group
tableBundle, err = placement.NewTableBundle(t, tblInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}

if tableBundle != nil {
bundles = append(bundles, tableBundle)
}

if err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles); err != nil {
job.State = model.JobStateCancelled
return ver, err
}

job.SchemaState = model.StateDeleteOnly
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != job.SchemaState)
case model.StateDeleteOnly:
Expand Down Expand Up @@ -1915,11 +1960,13 @@ func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, e
return ver, errors.Trace(dbterror.ErrPartitionMgmtOnNonpartitioned)
}

oldPartitions := make([]model.PartitionDefinition, 0, len(oldIDs))
newPartitions := make([]model.PartitionDefinition, 0, len(oldIDs))
for _, oldID := range oldIDs {
for i := 0; i < len(pi.Definitions); i++ {
def := &pi.Definitions[i]
if def.ID == oldID {
oldPartitions = append(oldPartitions, def.Clone())
pid, err1 := t.GenGlobalID()
if err1 != nil {
return ver, errors.Trace(err1)
Expand Down Expand Up @@ -1967,6 +2014,28 @@ func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, e
return ver, errors.Trace(err)
}

<<<<<<< HEAD
=======
tableBundle, err := placement.NewTableBundle(t, tblInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}

if tableBundle != nil {
bundles = append(bundles, tableBundle)
}

// create placement groups for each dropped partition to keep the data's placement before GC
// These placements groups will be deleted after GC
keepDroppedBundles, err := droppedPartitionBundles(t, tblInfo, oldPartitions)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
bundles = append(bundles, keepDroppedBundles...)

>>>>>>> f7ccac1c230 (ddl: fix the placement behavior when drop/truncate partitions (#44129))
err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), bundles)
if err != nil {
job.State = model.JobStateCancelled
Expand Down
Loading