Skip to content

Commit

Permalink
*: add metadata lock when using the plan cache (pingcap#51897) (pingc…
Browse files Browse the repository at this point in the history
  • Loading branch information
ti-chi-bot authored Jun 12, 2024
1 parent 681b43f commit 2306250
Show file tree
Hide file tree
Showing 19 changed files with 242 additions and 54 deletions.
2 changes: 1 addition & 1 deletion ddl/metadatalocktest/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ go_test(
"mdl_test.go",
],
flaky = True,
shard_count = 34,
shard_count = 36,
deps = [
"//config",
"//ddl",
Expand Down
95 changes: 95 additions & 0 deletions ddl/metadatalocktest/mdl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,101 @@ func TestMDLPreparePlanCacheInvalid(t *testing.T) {
tk.MustQuery(`execute stmt_test_1 using @a;`).Check(testkit.Rows("1 <nil>", "2 <nil>", "3 <nil>", "4 <nil>"))
}

func TestMDLPreparePlanCacheExecute(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)

sv := server.CreateMockServer(t, store)

sv.SetDomain(dom)
dom.InfoSyncer().SetSessionManager(sv)
defer sv.Close()

conn1 := server.CreateMockConn(t, sv)
tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session)
conn2 := server.CreateMockConn(t, sv)
tkDDL := testkit.NewTestKitWithSession(t, store, conn2.Context().Session)
tk.MustExec("use test")
tk.MustExec("set global tidb_enable_metadata_lock=1")
tk.MustExec("create table t(a int);")
tk.MustExec("create table t2(a int);")
tk.MustExec("insert into t values(1), (2), (3), (4);")

tk.MustExec(`prepare stmt_test_1 from 'update t set a = ? where a = ?';`)
tk.MustExec(`set @a = 1, @b = 3;`)
tk.MustExec(`execute stmt_test_1 using @a, @b;`)

tk.MustExec("begin")

ch := make(chan struct{})

var wg sync.WaitGroup
wg.Add(1)
go func() {
<-ch
tkDDL.MustExec("alter table test.t add index idx(a);")
wg.Done()
}()

tk.MustQuery("select * from t2")
tk.MustExec(`set @a = 2, @b=4;`)
tk.MustExec(`execute stmt_test_1 using @a, @b;`)
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
// The plan is from cache, the metadata lock should be added to block the DDL.
ch <- struct{}{}

time.Sleep(5 * time.Second)

tk.MustExec("commit")

wg.Wait()

tk.MustExec("admin check table t")
}

func TestMDLPreparePlanCacheExecute2(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)

sv := server.CreateMockServer(t, store)

sv.SetDomain(dom)
dom.InfoSyncer().SetSessionManager(sv)
defer sv.Close()

conn1 := server.CreateMockConn(t, sv)
tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session)
conn2 := server.CreateMockConn(t, sv)
tkDDL := testkit.NewTestKitWithSession(t, store, conn2.Context().Session)
tk.MustExec("use test")
tk.MustExec("set global tidb_enable_metadata_lock=1")
tk.MustExec("create table t(a int);")
tk.MustExec("create table t2(a int);")
tk.MustExec("insert into t values(1), (2), (3), (4);")

tk.MustExec(`prepare stmt_test_1 from 'select * from t where a = ?';`)
tk.MustExec(`set @a = 1;`)
tk.MustExec(`execute stmt_test_1 using @a;`)

tk.MustExec("begin")
tk.MustQuery("select * from t2")

var wg sync.WaitGroup
wg.Add(1)
go func() {
tkDDL.MustExec("alter table test.t add index idx(a);")
wg.Done()
}()

wg.Wait()

tk.MustExec(`set @a = 2;`)
tk.MustExec(`execute stmt_test_1 using @a;`)
// The plan should not be from cache because the schema has changed.
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustExec("commit")

tk.MustExec("admin check table t")
}

func TestMDLDisable2Enable(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
sv := server.CreateMockServer(t, store)
Expand Down
1 change: 1 addition & 0 deletions domain/plan_replayer_dump.go
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,7 @@ func dumpExplain(ctx sessionctx.Context, zw *zip.Writer, execStmts []ast.StmtNod
return nil
}

// extractTableNames extracts table names from the given stmts.
func extractTableNames(ctx context.Context, sctx sessionctx.Context,
ExecStmts []ast.StmtNode, curDB model.CIStr) (map[tableNamePair]struct{}, error) {
tableExtractor := &tableNameExtractor{
Expand Down
1 change: 1 addition & 0 deletions executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1985,6 +1985,7 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) {
vars.DiskTracker.ResetMaxConsumed()
vars.MemTracker.SessionID.Store(vars.ConnectionID)
vars.StmtCtx.TableStats = make(map[int64]interface{})
sc.MDLRelatedTableIDs = make(map[int64]int64)

isAnalyze := false
if execStmt, ok := s.(*ast.ExecuteStmt); ok {
Expand Down
5 changes: 3 additions & 2 deletions executor/prepared.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/pingcap/tidb/parser/mysql"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/chunk"
Expand Down Expand Up @@ -117,7 +118,7 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error {
return err
}
}
stmt, p, paramCnt, err := plannercore.GeneratePlanCacheStmtWithAST(ctx, e.ctx, stmt0)
stmt, p, paramCnt, err := plannercore.GeneratePlanCacheStmtWithAST(ctx, e.ctx, stmt0, sessiontxn.GetTxnManager(e.ctx).GetTxnInfoSchema())
if err != nil {
return err
}
Expand Down Expand Up @@ -211,7 +212,7 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error {
if e.ctx.GetSessionVars().EnablePreparedPlanCache {
bindSQL, _ := plannercore.GetBindSQL4PlanCache(e.ctx, preparedObj)
cacheKey, err := plannercore.NewPlanCacheKey(vars, preparedObj.StmtText, preparedObj.StmtDB, prepared.SchemaVersion,
0, bindSQL)
0, bindSQL, preparedObj.RelateVersion)
if err != nil {
return err
}
Expand Down
3 changes: 1 addition & 2 deletions executor/seqtest/seq_executor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -940,8 +940,7 @@ func TestBatchInsertDelete(t *testing.T) {
atomic.StoreUint64(&kv.TxnTotalSizeLimit, originLimit)
}()
// Set the limitation to a small value, make it easier to reach the limitation.
atomic.StoreUint64(&kv.TxnTotalSizeLimit, 5800)

atomic.StoreUint64(&kv.TxnTotalSizeLimit, 7000)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists batch_insert")
Expand Down
2 changes: 2 additions & 0 deletions meta/meta.go
Original file line number Diff line number Diff line change
Expand Up @@ -828,6 +828,8 @@ func (m *Meta) UpdateTable(dbID int64, tableInfo *model.TableInfo) error {
return errors.Trace(err)
}

tableInfo.Revision++

data, err := json.Marshal(tableInfo)
if err != nil {
return errors.Trace(err)
Expand Down
3 changes: 3 additions & 0 deletions parser/model/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -567,6 +567,9 @@ type TableInfo struct {
ExchangePartitionInfo *ExchangePartitionInfo `json:"exchange_partition_info"`

TTLInfo *TTLInfo `json:"ttl_info"`

// Revision is per table schema's version, it will be increased when the schema changed.
Revision uint64 `json:"revision"`
}

// SepAutoInc decides whether _rowid and auto_increment id use separate allocator.
Expand Down
36 changes: 31 additions & 5 deletions planner/core/plan_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,33 @@ func planCachePreprocess(ctx context.Context, sctx sessionctx.Context, isGeneral
vars.PreparedParams = append(vars.PreparedParams, val)
}

// step 3: check schema version
if stmtAst.SchemaVersion != is.SchemaMetaVersion() {
// step 3: add metadata lock and check each table's schema version
schemaNotMatch := false
for i := 0; i < len(stmt.dbName); i++ {
_, ok := is.TableByID(stmt.tbls[i].Meta().ID)
if !ok {
tblByName, err := is.TableByName(stmt.dbName[i], stmt.tbls[i].Meta().Name)
if err != nil {
return ErrSchemaChanged.GenWithStack("Schema change caused error: %s", err.Error())
}
delete(stmt.RelateVersion, stmt.tbls[i].Meta().ID)
stmt.tbls[i] = tblByName
stmt.RelateVersion[tblByName.Meta().ID] = tblByName.Meta().Revision
}
newTbl, err := tryLockMDLAndUpdateSchemaIfNecessary(sctx, stmt.dbName[i], stmt.tbls[i], is)
if err != nil {
schemaNotMatch = true
continue
}
if stmt.tbls[i].Meta().Revision != newTbl.Meta().Revision {
schemaNotMatch = true
}
stmt.tbls[i] = newTbl
stmt.RelateVersion[newTbl.Meta().ID] = newTbl.Meta().Revision
}

// step 4: check schema version
if schemaNotMatch || stmt.PreparedAst.SchemaVersion != is.SchemaMetaVersion() {
// In order to avoid some correctness issues, we have to clear the
// cached plan once the schema version is changed.
// Cached plan in prepared struct does NOT have a "cache key" with
Expand All @@ -99,7 +124,7 @@ func planCachePreprocess(ctx context.Context, sctx sessionctx.Context, isGeneral
stmtAst.SchemaVersion = is.SchemaMetaVersion()
}

// step 4: handle expiration
// step 5: handle expiration
// If the lastUpdateTime less than expiredTimeStamp4PC,
// it means other sessions have executed 'admin flush instance plan_cache'.
// So we need to clear the current session's plan cache.
Expand All @@ -110,6 +135,7 @@ func planCachePreprocess(ctx context.Context, sctx sessionctx.Context, isGeneral
stmtAst.CachedPlan = nil
vars.LastUpdateTime4PC = expiredTimeStamp4PC
}

return nil
}

Expand Down Expand Up @@ -151,7 +177,7 @@ func GetPlanFromSessionPlanCache(ctx context.Context, sctx sessionctx.Context,
latestSchemaVersion = domain.GetDomain(sctx).InfoSchema().SchemaMetaVersion()
}
if cacheKey, err = NewPlanCacheKey(sctx.GetSessionVars(), stmt.StmtText,
stmt.StmtDB, stmtAst.SchemaVersion, latestSchemaVersion, bindSQL); err != nil {
stmt.StmtDB, stmtAst.SchemaVersion, latestSchemaVersion, bindSQL, stmt.RelateVersion); err != nil {
return nil, nil, err
}
}
Expand Down Expand Up @@ -293,7 +319,7 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isGeneralPlan
if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmtAst.Stmt, sessVars) {
delete(sessVars.IsolationReadEngines, kv.TiFlash)
if cacheKey, err = NewPlanCacheKey(sessVars, stmt.StmtText, stmt.StmtDB,
stmtAst.SchemaVersion, latestSchemaVersion, bindSQL); err != nil {
stmtAst.SchemaVersion, latestSchemaVersion, bindSQL, stmt.RelateVersion); err != nil {
return nil, nil, err
}
sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{}
Expand Down
Loading

0 comments on commit 2306250

Please sign in to comment.