diff options
author | Ian Rogers <irogers@google.com> | 2023-07-18 17:18:36 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2023-07-27 10:32:12 -0300 |
commit | b161f25fa30644598007d752a1b802cda0140788 (patch) | |
tree | ef5693227c3de04399e8cf11fbede46ea81f695a /tools/perf | |
parent | e8d38345da249be17046b74d7bb64b77cdd07a08 (diff) |
perf parse-events: Only move force grouped evsels when sorting
Prior to this change, events without a group would be sorted as if they
were from the location of the first event without a group. For example
instructions and cycles are without a group:
instructions,{imc_free_running/data_read/,imc_free_running/data_write/},cycles
parse events would create an eventual evlist like:
instructions,cycles,{uncore_imc_free_running_0/data_read/,uncore_imc_free_running_1/data_read/,uncore_imc_free_running_0/data_write/,uncore_imc_free_running_1/data_write/}
This is done so that perf metric events, that must always be in a
group, will be adjacent and so can be forced into a group.
This change modifies the sorting so that only force grouped events,
like perf metrics, are sorted and all other events keep their position
with respect to groups in the evlist. The location of the force
grouped event is chosen to match the first force grouped event.
For architectures without force grouped events, ie anything not Intel
Icelake or newer, this should mean sorting and fixing doesn't modify
the event positions except when fixing the grouping for PMUs of things
like uncore events.
Fixes: 347c2f0a0988c59c ("perf parse-events: Sort and group parsed events")
Reported-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Andi Kleen <ak@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20230719001836.198363-4-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/util/parse-events.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 62d5ff5d8dae..c9ec0cafb69d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -2100,16 +2100,16 @@ __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) return lhs->core.idx - rhs->core.idx; } -static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r) +static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) { const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); const struct evsel *lhs = container_of(lhs_core, struct evsel, core); const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); const struct evsel *rhs = container_of(rhs_core, struct evsel, core); - int *leader_idx = state; - int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret; + int *force_grouped_idx = _fg_idx; + int lhs_sort_idx, rhs_sort_idx, ret; const char *lhs_pmu_name, *rhs_pmu_name; - bool lhs_has_group = false, rhs_has_group = false; + bool lhs_has_group, rhs_has_group; /* * First sort by grouping/leader. Read the leader idx only if the evsel @@ -2121,15 +2121,25 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list */ if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { lhs_has_group = true; - lhs_leader_idx = lhs_core->leader->idx; + lhs_sort_idx = lhs_core->leader->idx; + } else { + lhs_has_group = false; + lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs) + ? *force_grouped_idx + : lhs_core->idx; } if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { rhs_has_group = true; - rhs_leader_idx = rhs_core->leader->idx; + rhs_sort_idx = rhs_core->leader->idx; + } else { + rhs_has_group = false; + rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs) + ? *force_grouped_idx + : rhs_core->idx; } - if (lhs_leader_idx != rhs_leader_idx) - return lhs_leader_idx - rhs_leader_idx; + if (lhs_sort_idx != rhs_sort_idx) + return lhs_sort_idx - rhs_sort_idx; /* Group by PMU if there is a group. Groups can't span PMUs. */ if (lhs_has_group && rhs_has_group) { @@ -2146,7 +2156,7 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list static int parse_events__sort_events_and_fix_groups(struct list_head *list) { - int idx = 0, unsorted_idx = -1; + int idx = 0, force_grouped_idx = -1; struct evsel *pos, *cur_leader = NULL; struct perf_evsel *cur_leaders_grp = NULL; bool idx_changed = false, cur_leader_force_grouped = false; @@ -2174,12 +2184,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) */ pos->core.idx = idx++; - if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2) - unsorted_idx = pos->core.idx; + /* Remember an index to sort all forced grouped events together to. */ + if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 && + arch_evsel__must_be_in_group(pos)) + force_grouped_idx = pos->core.idx; } /* Sort events. */ - list_sort(&unsorted_idx, list, evlist__cmp); + list_sort(&force_grouped_idx, list, evlist__cmp); /* * Recompute groups, splitting for PMUs and adding groups for events @@ -2190,7 +2202,8 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list) const struct evsel *pos_leader = evsel__leader(pos); const char *pos_pmu_name = pos->group_pmu_name; const char *cur_leader_pmu_name; - bool pos_force_grouped = arch_evsel__must_be_in_group(pos); + bool pos_force_grouped = force_grouped_idx != -1 && + arch_evsel__must_be_in_group(pos); /* Reset index and nr_members. */ if (pos->core.idx != idx) |