-
Notifications
You must be signed in to change notification settings - Fork 5.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
*: record previous statement when commit is slow #11908
Changes from 2 commits
6d4a45b
96864a6
8e9e414
9ac681f
9c6ae30
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -177,6 +177,7 @@ func (a *recordSet) NewChunk() *chunk.Chunk { | |
func (a *recordSet) Close() error { | ||
err := a.executor.Close() | ||
a.stmt.LogSlowQuery(a.txnStartTS, a.lastErr == nil) | ||
a.stmt.Ctx.GetSessionVars().PrevStmt = a.stmt.OriginText() | ||
a.stmt.logAudit() | ||
return err | ||
} | ||
|
@@ -682,6 +683,16 @@ func (a *ExecStmt) logAudit() { | |
} | ||
} | ||
|
||
// FormatSQL is used to format the original SQL, e.g. truncating long SQL, appending prepared arguments. | ||
func FormatSQL(sql string, sessVars *variable.SessionVars) string { | ||
cfg := config.GetGlobalConfig() | ||
length := len(sql) | ||
if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(length) > maxQueryLen { | ||
sql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, sql, length) | ||
} | ||
return QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo() | ||
} | ||
|
||
// LogSlowQuery is used to print the slow query in the log files. | ||
func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { | ||
sessVars := a.Ctx.GetSessionVars() | ||
|
@@ -695,11 +706,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { | |
if costTime < threshold && level > zapcore.DebugLevel { | ||
return | ||
} | ||
sql := a.Text | ||
if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(len(sql)) > maxQueryLen { | ||
sql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, sql, len(a.Text)) | ||
} | ||
sql = QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo() | ||
sql := FormatSQL(a.Text, sessVars) | ||
|
||
var tableIDs, indexNames string | ||
if len(sessVars.StmtCtx.TableIDs) > 0 { | ||
|
@@ -712,38 +719,28 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) { | |
copTaskInfo := sessVars.StmtCtx.CopTasksDetails() | ||
statsInfos := plannercore.GetStatsInfo(a.Plan) | ||
memMax := sessVars.StmtCtx.MemTracker.MaxConsumed() | ||
_, digest := sessVars.StmtCtx.SQLDigest() | ||
slowItems := &variable.SlowQueryLogItems{ | ||
TxnTS: txnTS, | ||
SQL: sql, | ||
Digest: digest, | ||
TimeTotal: costTime, | ||
TimeParse: a.Ctx.GetSessionVars().DurationParse, | ||
TimeCompile: a.Ctx.GetSessionVars().DurationCompile, | ||
IndexNames: indexNames, | ||
StatsInfos: statsInfos, | ||
CopTasks: copTaskInfo, | ||
ExecDetail: execDetail, | ||
MemMax: memMax, | ||
Succ: succ, | ||
} | ||
if _, ok := a.StmtNode.(*ast.CommitStmt); ok { | ||
slowItems.PrevStmt = FormatSQL(sessVars.PrevStmt, sessVars) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It is all what we could do :) |
||
} | ||
if costTime < threshold { | ||
_, digest := sessVars.StmtCtx.SQLDigest() | ||
logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(&variable.SlowQueryLogItems{ | ||
TxnTS: txnTS, | ||
SQL: sql, | ||
Digest: digest, | ||
TimeTotal: costTime, | ||
TimeParse: a.Ctx.GetSessionVars().DurationParse, | ||
TimeCompile: a.Ctx.GetSessionVars().DurationCompile, | ||
IndexNames: indexNames, | ||
StatsInfos: statsInfos, | ||
CopTasks: copTaskInfo, | ||
ExecDetail: execDetail, | ||
MemMax: memMax, | ||
Succ: succ, | ||
})) | ||
logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(slowItems)) | ||
} else { | ||
_, digest := sessVars.StmtCtx.SQLDigest() | ||
logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(&variable.SlowQueryLogItems{ | ||
TxnTS: txnTS, | ||
SQL: sql, | ||
Digest: digest, | ||
TimeTotal: costTime, | ||
TimeParse: a.Ctx.GetSessionVars().DurationParse, | ||
TimeCompile: a.Ctx.GetSessionVars().DurationCompile, | ||
IndexNames: indexNames, | ||
StatsInfos: statsInfos, | ||
CopTasks: copTaskInfo, | ||
ExecDetail: execDetail, | ||
MemMax: memMax, | ||
Succ: succ, | ||
})) | ||
logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(slowItems)) | ||
metrics.TotalQueryProcHistogram.Observe(costTime.Seconds()) | ||
metrics.TotalCopProcHistogram.Observe(execDetail.ProcessTime.Seconds()) | ||
metrics.TotalCopWaitHistogram.Observe(execDetail.WaitTime.Seconds()) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -60,6 +60,7 @@ var slowQueryCols = []columnInfo{ | |
{variable.SlowLogCopWaitAddr, mysql.TypeVarchar, 64, 0, nil, nil}, | ||
{variable.SlowLogMemMax, mysql.TypeLonglong, 20, 0, nil, nil}, | ||
{variable.SlowLogSucc, mysql.TypeTiny, 1, 0, nil, nil}, | ||
{variable.SlowLogPrevStmt, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, | ||
{variable.SlowLogQuerySQLStr, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, | ||
} | ||
|
||
|
@@ -87,6 +88,7 @@ func parseSlowLogFile(tz *time.Location, filePath string) ([][]types.Datum, erro | |
func ParseSlowLog(tz *time.Location, reader *bufio.Reader) ([][]types.Datum, error) { | ||
var rows [][]types.Datum | ||
startFlag := false | ||
prevStmtPrefix := variable.SlowLogPrevStmt + variable.SlowLogSpaceMarkStr | ||
crazycs520 marked this conversation as resolved.
Show resolved
Hide resolved
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Make this to be a global const . |
||
var st *slowQueryTuple | ||
for { | ||
lineByte, err := getOneLine(reader) | ||
|
@@ -112,15 +114,19 @@ func ParseSlowLog(tz *time.Location, reader *bufio.Reader) ([][]types.Datum, err | |
// Parse slow log field. | ||
if strings.HasPrefix(line, variable.SlowLogRowPrefixStr) { | ||
line = line[len(variable.SlowLogRowPrefixStr):] | ||
fieldValues := strings.Split(line, " ") | ||
for i := 0; i < len(fieldValues)-1; i += 2 { | ||
field := fieldValues[i] | ||
if strings.HasSuffix(field, ":") { | ||
field = field[:len(field)-1] | ||
} | ||
err = st.setFieldValue(tz, field, fieldValues[i+1]) | ||
if err != nil { | ||
return rows, err | ||
if strings.HasPrefix(line, prevStmtPrefix) { | ||
st.prevStmt = line[len(prevStmtPrefix):] | ||
} else { | ||
fieldValues := strings.Split(line, " ") | ||
for i := 0; i < len(fieldValues)-1; i += 2 { | ||
field := fieldValues[i] | ||
if strings.HasSuffix(field, ":") { | ||
field = field[:len(field)-1] | ||
} | ||
err = st.setFieldValue(tz, field, fieldValues[i+1]) | ||
if err != nil { | ||
return rows, err | ||
} | ||
} | ||
} | ||
} else if strings.HasSuffix(line, variable.SlowLogSQLSuffixStr) { | ||
|
@@ -195,6 +201,7 @@ type slowQueryTuple struct { | |
maxWaitTime float64 | ||
maxWaitAddress string | ||
memMax int64 | ||
prevStmt string | ||
sql string | ||
isInternal bool | ||
succ bool | ||
|
@@ -313,6 +320,7 @@ func (st *slowQueryTuple) convertToDatumRow() []types.Datum { | |
} else { | ||
record = append(record, types.NewIntDatum(0)) | ||
} | ||
record = append(record, types.NewStringDatum(st.prevStmt)) | ||
record = append(record, types.NewStringDatum(st.sql)) | ||
return record | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's better to be put inside the
if...else...
branch? If the log level is higher thandebug
, we can save an object allocation?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think it will return at https://github.com/pingcap/tidb/pull/11908/files#diff-350127760839dbfd52d23927f7ff2d95R706 if the log level is higher than debug.