[20/19] perf stat: Factor out evsel__count_has_error()
Commit Message
It's possible to have 0 enabled/running time for some per-task or per-cgroup
events since it's not scheduled on any CPU. Treating the whole event as
failed would not work in this case. Thinking again, the code only existed
when any CPU-level aggregation is enabled (like per-socket, per-core, ...).
To make it clearer, factor out the condition check into the new
evsel__count_has_error() function and add some comments.
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/util/stat.c | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
Comments
Em Fri, Oct 14, 2022 at 11:16:55AM -0700, Namhyung Kim escreveu:
> It's possible to have 0 enabled/running time for some per-task or per-cgroup
> events since it's not scheduled on any CPU. Treating the whole event as
> failed would not work in this case. Thinking again, the code only existed
> when any CPU-level aggregation is enabled (like per-socket, per-core, ...).
>
> To make it clearer, factor out the condition check into the new
> evsel__count_has_error() function and add some comments.
So I should just add this one to the 19-long patchkit I already applied
locally, ok.
- Arnaldo
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> ---
> tools/perf/util/stat.c | 25 +++++++++++++++++++++----
> 1 file changed, 21 insertions(+), 4 deletions(-)
>
> diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
> index 6ab9c58beca7..9dfa8cac6bc4 100644
> --- a/tools/perf/util/stat.c
> +++ b/tools/perf/util/stat.c
> @@ -396,6 +396,25 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
> return ret;
> }
>
> +static bool evsel__count_has_error(struct evsel *evsel,
> + struct perf_counts_values *count,
> + struct perf_stat_config *config)
> +{
> + /* the evsel was failed already */
> + if (evsel->err || evsel->counts->scaled == -1)
> + return true;
> +
> + /* this is meaningful for CPU aggregation modes only */
> + if (config->aggr_mode == AGGR_GLOBAL)
> + return false;
> +
> + /* it's considered ok when it actually ran */
> + if (count->ena != 0 && count->run != 0)
> + return false;
> +
> + return true;
> +}
> +
> static int
> process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
> int cpu_map_idx, int thread,
> @@ -450,11 +469,9 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
>
> /*
> * When any result is bad, make them all to give consistent output
> - * in interval mode. But per-task counters can have 0 enabled time
> - * when some tasks are idle.
> + * in interval mode.
> */
> - if (((count->ena == 0 || count->run == 0) && cpu.cpu != -1) ||
> - evsel->counts->scaled == -1) {
> + if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
> ps_aggr->counts.val = 0;
> ps_aggr->counts.ena = 0;
> ps_aggr->counts.run = 0;
> --
> 2.38.0.413.g74048e4d9e-goog
@@ -396,6 +396,25 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
return ret;
}
+static bool evsel__count_has_error(struct evsel *evsel,
+ struct perf_counts_values *count,
+ struct perf_stat_config *config)
+{
+ /* the evsel was failed already */
+ if (evsel->err || evsel->counts->scaled == -1)
+ return true;
+
+ /* this is meaningful for CPU aggregation modes only */
+ if (config->aggr_mode == AGGR_GLOBAL)
+ return false;
+
+ /* it's considered ok when it actually ran */
+ if (count->ena != 0 && count->run != 0)
+ return false;
+
+ return true;
+}
+
static int
process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
int cpu_map_idx, int thread,
@@ -450,11 +469,9 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
/*
* When any result is bad, make them all to give consistent output
- * in interval mode. But per-task counters can have 0 enabled time
- * when some tasks are idle.
+ * in interval mode.
*/
- if (((count->ena == 0 || count->run == 0) && cpu.cpu != -1) ||
- evsel->counts->scaled == -1) {
+ if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
ps_aggr->counts.val = 0;
ps_aggr->counts.ena = 0;
ps_aggr->counts.run = 0;