diff options
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r-- | tools/perf/builtin-record.c | 54 |
1 files changed, 32 insertions, 22 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8374117e66f6..efa03e4ac2c9 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -52,6 +52,7 @@ #include "util/pmu-hybrid.h" #include "util/evlist-hybrid.h" #include "util/off_cpu.h" +#include "util/bpf-filter.h" #include "asm/bug.h" #include "perf.h" #include "cputopo.h" @@ -1292,7 +1293,7 @@ static int record__open(struct record *rec) * dummy event so that we can track PERF_RECORD_MMAP to cover the delay * of waiting or event synthesis. */ - if (opts->initial_delay || target__has_cpu(&opts->target) || + if (opts->target.initial_delay || target__has_cpu(&opts->target) || perf_pmu__has_hybrid()) { pos = evlist__get_tracking_event(evlist); if (!evsel__is_dummy_event(pos)) { @@ -1307,7 +1308,7 @@ static int record__open(struct record *rec) * Enable the dummy event when the process is forked for * initial_delay, immediately for system wide. */ - if (opts->initial_delay && !pos->immediate && + if (opts->target.initial_delay && !pos->immediate && !target__has_cpu(&opts->target)) pos->core.attr.enable_on_exec = 1; else @@ -1352,7 +1353,7 @@ try_again: if (evlist__apply_filters(evlist, &pos)) { pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", - pos->filter, evsel__name(pos), errno, + pos->filter ?: "BPF", evsel__name(pos), errno, str_error_r(errno, msg, sizeof(msg))); rc = -1; goto out; @@ -1856,24 +1857,16 @@ record__switch_output(struct record *rec, bool at_exit) return fd; } -static void __record__read_lost_samples(struct record *rec, struct evsel *evsel, +static void __record__save_lost_samples(struct record *rec, struct evsel *evsel, struct perf_record_lost_samples *lost, - int cpu_idx, int thread_idx) + int cpu_idx, int thread_idx, u64 lost_count, + u16 misc_flag) { - struct perf_counts_values count; struct perf_sample_id *sid; struct perf_sample sample = {}; int id_hdr_size; - if (perf_evsel__read(&evsel->core, cpu_idx, thread_idx, &count) < 0) { - pr_err("read LOST count failed\n"); - return; - } - - if (count.lost == 0) - return; - - lost->lost = count.lost; + lost->lost = lost_count; if (evsel->core.ids) { sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx); sample.id = sid->id; @@ -1882,6 +1875,7 @@ static void __record__read_lost_samples(struct record *rec, struct evsel *evsel, id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1), evsel->core.attr.sample_type, &sample); lost->header.size = sizeof(*lost) + id_hdr_size; + lost->header.misc = misc_flag; record__write(rec, NULL, lost, lost->header.size); } @@ -1905,6 +1899,7 @@ static void record__read_lost_samples(struct record *rec) evlist__for_each_entry(session->evlist, evsel) { struct xyarray *xy = evsel->core.sample_id; + u64 lost_count; if (xy == NULL || evsel->core.fd == NULL) continue; @@ -1916,12 +1911,27 @@ static void record__read_lost_samples(struct record *rec) for (int x = 0; x < xyarray__max_x(xy); x++) { for (int y = 0; y < xyarray__max_y(xy); y++) { - __record__read_lost_samples(rec, evsel, lost, x, y); + struct perf_counts_values count; + + if (perf_evsel__read(&evsel->core, x, y, &count) < 0) { + pr_debug("read LOST count failed\n"); + goto out; + } + + if (count.lost) { + __record__save_lost_samples(rec, evsel, lost, + x, y, count.lost, 0); + } } } + + lost_count = perf_bpf_filter__lost_count(evsel); + if (lost_count) + __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count, + PERF_RECORD_MISC_LOST_SAMPLES_BPF); } +out: free(lost); - } static volatile sig_atomic_t workload_exec_errno; @@ -2474,7 +2484,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) rec->tool.ordered_events = false; } - if (!rec->evlist->core.nr_groups) + if (evlist__nr_groups(rec->evlist) == 0) perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); if (data->is_pipe) { @@ -2522,7 +2532,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) * (apart from group members) have enable_on_exec=1 set, * so don't spoil it by prematurely enabling them. */ - if (!target__none(&opts->target) && !opts->initial_delay) + if (!target__none(&opts->target) && !opts->target.initial_delay) evlist__enable(rec->evlist); /* @@ -2574,10 +2584,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) evlist__start_workload(rec->evlist); } - if (opts->initial_delay) { + if (opts->target.initial_delay) { pr_info(EVLIST_DISABLED_MSG); - if (opts->initial_delay > 0) { - usleep(opts->initial_delay * USEC_PER_MSEC); + if (opts->target.initial_delay > 0) { + usleep(opts->target.initial_delay * USEC_PER_MSEC); evlist__enable(rec->evlist); pr_info(EVLIST_ENABLED_MSG); } |