Skip to content

Commit

Permalink
statistics: check Killed in the GenJSONTableFromStats (#47778)
Browse files Browse the repository at this point in the history
close #47779
  • Loading branch information
hawkingrei authored Oct 20, 2023
1 parent 0847179 commit 9b8890d
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 8 deletions.
23 changes: 19 additions & 4 deletions pkg/statistics/handle/storage/json.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"bytes"
"encoding/json"
"io"
"sync/atomic"
"time"

"github.com/klauspost/compress/gzip"
Expand All @@ -31,6 +32,7 @@ import (
"github.com/pingcap/tidb/pkg/types"
compressutil "github.com/pingcap/tidb/pkg/util/compress"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/memory"
"go.uber.org/zap"
)

Expand Down Expand Up @@ -88,7 +90,10 @@ func dumpJSONCol(hist *statistics.Histogram, cmsketch *statistics.CMSketch, topn
}

// GenJSONTableFromStats generate jsonTable from tableInfo and stats
func GenJSONTableFromStats(dbName string, tableInfo *model.TableInfo, tbl *statistics.Table) (*util.JSONTable, error) {
func GenJSONTableFromStats(sctx sessionctx.Context, dbName string, tableInfo *model.TableInfo, tbl *statistics.Table) (*util.JSONTable, error) {
tracker := memory.NewTracker(memory.LabelForAnalyzeMemory, -1)
tracker.AttachTo(sctx.GetSessionVars().MemTracker)
defer tracker.Detach()
jsonTbl := &util.JSONTable{
DatabaseName: dbName,
TableName: tableInfo.Name.L,
Expand All @@ -104,11 +109,21 @@ func GenJSONTableFromStats(dbName string, tableInfo *model.TableInfo, tbl *stati
if err != nil {
return nil, errors.Trace(err)
}
jsonTbl.Columns[col.Info.Name.L] = dumpJSONCol(hist, col.CMSketch, col.TopN, col.FMSketch, &col.StatsVer)
proto := dumpJSONCol(hist, col.CMSketch, col.TopN, col.FMSketch, &col.StatsVer)
tracker.Consume(proto.TotalMemoryUsage())
if atomic.LoadUint32(&sctx.GetSessionVars().Killed) == 1 {
return nil, errors.Trace(statistics.ErrQueryInterrupted)
}
jsonTbl.Columns[col.Info.Name.L] = proto
col.FMSketch.DestroyAndPutToPool()
}

for _, idx := range tbl.Indices {
jsonTbl.Indices[idx.Info.Name.L] = dumpJSONCol(&idx.Histogram, idx.CMSketch, idx.TopN, nil, &idx.StatsVer)
proto := dumpJSONCol(&idx.Histogram, idx.CMSketch, idx.TopN, nil, &idx.StatsVer)
tracker.Consume(proto.TotalMemoryUsage())
if atomic.LoadUint32(&sctx.GetSessionVars().Killed) == 1 {
return nil, errors.Trace(statistics.ErrQueryInterrupted)
}
jsonTbl.Indices[idx.Info.Name.L] = proto
}
jsonTbl.ExtStats = dumpJSONExtendedStats(tbl.ExtendedStats)
return jsonTbl, nil
Expand Down
9 changes: 5 additions & 4 deletions pkg/statistics/handle/storage/stats_read_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,17 +478,18 @@ func (s *statsReadWriter) TableStatsToJSON(dbName string, tableInfo *model.Table
if err != nil || tbl == nil {
return nil, err
}
var jsonTbl *util.JSONTable
err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error {
tbl.Version, tbl.ModifyCount, tbl.RealtimeCount, err = StatsMetaByTableIDFromStorage(sctx, physicalID, snapshot)
if err != nil {
return err
}
jsonTbl, err = GenJSONTableFromStats(sctx, dbName, tableInfo, tbl)
return err
})
if err != nil {
return nil, err
}
jsonTbl, err := GenJSONTableFromStats(dbName, tableInfo, tbl)
if err != nil {
return nil, err
}
return jsonTbl, nil
}

Expand Down
14 changes: 14 additions & 0 deletions pkg/statistics/handle/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,3 +280,17 @@ type JSONColumn struct {
LastUpdateVersion uint64 `json:"last_update_version"`
Correlation float64 `json:"correlation"`
}

// TotalMemoryUsage returns the total memory usage of this column.
func (col *JSONColumn) TotalMemoryUsage() (size int64) {
if col.Histogram != nil {
size += int64(col.Histogram.Size())
}
if col.CMSketch != nil {
size += int64(col.CMSketch.Size())
}
if col.FMSketch != nil {
size += int64(col.FMSketch.Size())
}
return size
}

0 comments on commit 9b8890d

Please sign in to comment.