Skip to content

Commit

Permalink
perf stat: Split process_counters() to share it with process_stat_rou…
Browse files Browse the repository at this point in the history
…nd_event()

It'd do more processing with aggregation.  Let's split the function so that it
can be shared with by process_stat_round_event() too.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221018020227.85905-15-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
  • Loading branch information
namhyung authored and acmel committed Oct 27, 2022
1 parent 8f97963 commit 8962cbe
Showing 1 changed file with 13 additions and 9 deletions.
22 changes: 13 additions & 9 deletions tools/perf/builtin-stat.c
Original file line number Diff line number Diff line change
Expand Up @@ -465,15 +465,19 @@ static int read_bpf_map_counters(void)
return 0;
}

static void read_counters(struct timespec *rs)
static int read_counters(struct timespec *rs)
{
struct evsel *counter;

if (!stat_config.stop_read_counter) {
if (read_bpf_map_counters() ||
read_affinity_counters(rs))
return;
return -1;
}
return 0;
}

static void process_counters(void)
{
struct evsel *counter;

evlist__for_each_entry(evsel_list, counter) {
if (counter->err)
Expand All @@ -494,7 +498,8 @@ static void process_interval(void)
perf_stat__reset_shadow_per_stat(&rt_stat);
evlist__reset_aggr_stats(evsel_list);

read_counters(&rs);
if (read_counters(&rs) == 0)
process_counters();

if (STAT_RECORD) {
if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
Expand Down Expand Up @@ -980,7 +985,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
process_counters();

/*
* We need to keep evsel_list alive, because it's processed
Expand Down Expand Up @@ -2099,13 +2105,11 @@ static int process_stat_round_event(struct perf_session *session,
union perf_event *event)
{
struct perf_record_stat_round *stat_round = &event->stat_round;
struct evsel *counter;
struct timespec tsh, *ts = NULL;
const char **argv = session->header.env.cmdline_argv;
int argc = session->header.env.nr_cmdline;

evlist__for_each_entry(evsel_list, counter)
perf_stat_process_counter(&stat_config, counter);
process_counters();

if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
update_stats(&walltime_nsecs_stats, stat_round->time);
Expand Down

0 comments on commit 8962cbe

Please sign in to comment.