From e3b03b6c1a4f3b4564be08809f58584592621a0a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 5 May 2016 16:04:03 -0700 Subject: perf stat: Avoid fractional digits for integer scales When the scaling factor is a full integer don't display fractional digits. This avoids unnecessary .00 output for topdown metrics with scale factors. v2: Remove redundant check. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1462489447-31832-7-git-send-email-andi@firstfloor.org [ Rename 'round' to 'stat_round' as 'round' is defined in math.h, included by this patch, and this breaks the build on ubuntu 12.04 ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 5645a8361de6..16a923c1633b 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -66,6 +66,7 @@ #include #include #include +#include #define DEFAULT_SEPARATOR " " #define CNTR_NOT_SUPPORTED "" @@ -986,12 +987,12 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg) const char *fmt; if (csv_output) { - fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s"; + fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s"; } else { if (big_num) - fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s"; + fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s"; else - fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s"; + fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s"; } aggr_printout(evsel, id, nr); @@ -1995,7 +1996,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_session *session) { - struct stat_round_event *round = &event->stat_round; + struct stat_round_event *stat_round = &event->stat_round; struct perf_evsel *counter; struct timespec tsh, *ts = NULL; const char **argv = session->header.env.cmdline_argv; @@ -2004,12 +2005,12 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused, evlist__for_each(evsel_list, counter) perf_stat_process_counter(&stat_config, counter); - if (round->type == PERF_STAT_ROUND_TYPE__FINAL) - update_stats(&walltime_nsecs_stats, round->time); + if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) + update_stats(&walltime_nsecs_stats, stat_round->time); - if (stat_config.interval && round->time) { - tsh.tv_sec = round->time / NSECS_PER_SEC; - tsh.tv_nsec = round->time % NSECS_PER_SEC; + if (stat_config.interval && stat_round->time) { + tsh.tv_sec = stat_round->time / NSECS_PER_SEC; + tsh.tv_nsec = stat_round->time % NSECS_PER_SEC; ts = &tsh; } -- cgit v1.2.3-58-ga151 From 6ae98ba611ed1c11ddc5645475bc03b46a3c04e7 Mon Sep 17 00:00:00 2001 From: He Kuang Date: Thu, 12 May 2016 08:43:11 +0000 Subject: perf symbols: Store vdso buildid unconditionally When unwinding callchains on a different machine, vdso info should be available so the unwind process won't be interrupted if address falls into vdso region. But in most cases, the addresses of sample events are not in vdso range, the buildid of a zero hit vdso won't be stored into perf.data. This patch stores vdso buildid regardless of whether the vdso is hit or not. Signed-off-by: He Kuang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: David Ahern Cc: Ekaterina Tumanova Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Kan Liang Cc: Masami Hiramatsu Cc: Namhyung Kim Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Sukadev Bhattiprolu Cc: Wang Nan Link: http://lkml.kernel.org/r/1463042596-61703-3-git-send-email-hekuang@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/build-id.c | 2 +- tools/perf/util/dso.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index bff425e1232c..67e5966503b2 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd) size_t name_len; bool in_kernel = false; - if (!pos->hit) + if (!pos->hit && !dso__is_vdso(pos)) continue; if (dso__is_vdso(pos)) { diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 3357479082ca..75b75615e2f8 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -7,6 +7,7 @@ #include "auxtrace.h" #include "util.h" #include "debug.h" +#include "vdso.h" char dso__symtab_origin(const struct dso *dso) { @@ -1169,7 +1170,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits) struct dso *pos; list_for_each_entry(pos, head, node) { - if (with_hits && !pos->hit) + if (with_hits && !pos->hit && !dso__is_vdso(pos)) continue; if (pos->has_build_id) { have_build_id = true; -- cgit v1.2.3-58-ga151 From b0404be8d6186f9f3c23e2b5ff247e667be90652 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 13 May 2016 15:01:01 +0900 Subject: perf stat: Fix indentation of stalled backend cycle The commit 140aeadc1fb5 ("perf stat: Abstract stat metrics printing") changed how shadow metrics are printed, but it missed to update the width of the stalled backend cycles event to 7.2% like others. This resulted in misaligned output like below: Performance counter stats for 'pwd': 0.638313 task-clock (msec) # 0.567 CPUs utilized 0 context-switches # 0.000 K/sec 0 cpu-migrations # 0.000 K/sec 54 page-faults # 0.085 M/sec 885,600 cycles # 1.387 GHz 558,438 stalled-cycles-frontend # 63.06% frontend cycles idle 431,355 stalled-cycles-backend # 48.71% backend cycles idle 674,956 instructions # 0.76 insn per cycle # 0.83 stalled cycles per insn 130,380 branches # 204.257 M/sec branch-misses 0.001125426 seconds time elapsed Signed-off-by: Namhyung Kim Cc: Andi Kleen Cc: Jiri Olsa Cc: Peter Zijlstra Fixes: 140aeadc1fb5 ("perf stat: Abstract stat metrics printing") Link: http://lkml.kernel.org/r/1463119263-5569-1-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-shadow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index fdb71961143e..61200fcac5ef 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -188,7 +188,7 @@ static void print_stalled_cycles_backend(int cpu, color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); - out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio); + out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio); } static void print_branch_misses(int cpu, -- cgit v1.2.3-58-ga151 From daf4f4786e8af371048e72cb37ac05190e89198a Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 13 May 2016 15:01:02 +0900 Subject: perf stat: Update runtime using cpu-clock event Currently only the task-clock event updates the runtime_nsec so it cannot show the metric when using cpu-clock events. However cpu clock works basically same as task-clock, so no need to not update the runtime IMHO. Before: # perf stat -a -e cpu-clock,context-switches,page-faults,cycles sleep 0.1 Performance counter stats for 'system wide': 1217.759506 cpu-clock (msec) 93 context-switches 61 page-faults 18,958,022 cycles 0.101393794 seconds time elapsed After: Performance counter stats for 'system wide': 1220.471884 cpu-clock (msec) # 12.013 CPUs utilized 118 context-switches # 0.097 K/sec 59 page-faults # 0.048 K/sec 17,941,247 cycles # 0.015 GHz 0.101594777 seconds time elapsed Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1463119263-5569-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-shadow.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 61200fcac5ef..aa9efe08762b 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, { int ctx = evsel_context(counter); - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || + perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) update_stats(&runtime_nsecs_stats[cpu], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); @@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel, ratio = total / avg; print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); - } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { + } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) || + perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) { if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", avg / ratio); -- cgit v1.2.3-58-ga151 From a1f3d56761df31f0ffeb215b974e26d5613e92a4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 13 May 2016 15:01:03 +0900 Subject: perf stat: Use cpu-clock event for cpu targets Currently 'perf stat' always counts task-clock event by default. But it's somewhat confusing for system-wide targets (especially with 'sleep N' as the 'sleep' task just sleeps and doesn't use cputime). Changing to cpu-clock event instead for that case makes more sense IMHO. Before: # perf stat -a sleep 0.1 Performance counter stats for 'system wide': 403.038603 task-clock (msec) # 4.001 CPUs utilized 150 context-switches # 0.372 K/sec 7 cpu-migrations # 0.017 K/sec 71 page-faults # 0.176 K/sec 23,705,169 cycles # 0.059 GHz 15,888,166 instructions # 0.67 insn per cycle 3,326,078 branches # 8.253 M/sec 87,643 branch-misses # 2.64% of all branches 0.100737009 seconds time elapsed # After: # perf stat -a sleep 0.1 Performance counter stats for 'system wide': 404.271182 cpu-clock (msec) # 4.000 CPUs utilized 143 context-switches # 0.354 K/sec 13 cpu-migrations # 0.032 K/sec 73 page-faults # 0.181 K/sec 22,119,220 cycles # 0.055 GHz 13,622,065 instructions # 0.62 insn per cycle 2,918,769 branches # 7.220 M/sec 85,033 branch-misses # 2.91% of all branches 0.101073089 seconds time elapsed # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1463119263-5569-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools') diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 16a923c1633b..efdd23221c16 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1905,6 +1905,9 @@ static int add_default_attributes(void) } if (!evsel_list->nr_entries) { + if (target__has_cpu(&target)) + default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; + if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) return -1; if (pmu_have_event("cpu", "stalled-cycles-frontend")) { -- cgit v1.2.3-58-ga151 From 0a77582f0407e7f9b5d775bebc31297a1b890be0 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sun, 15 May 2016 12:19:40 +0900 Subject: perf symbols: Introduce DSO__NAME_KALLSYMS and DSO__NAME_KCORE Instead of using a raw string, use DSO__NAME_KALLSYMS and DSO__NAME_KCORE macros for kallsyms and kcore. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Brendan Gregg Cc: Hemant Kumar Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20160515031935.4017.50971.stgit@devbox Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-buildid-cache.c | 8 ++++---- tools/perf/util/annotate.c | 2 +- tools/perf/util/machine.c | 2 +- tools/perf/util/symbol.c | 10 +++++----- tools/perf/util/symbol.h | 3 +++ 5 files changed, 14 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 632efc6b79a0..d75bded21fe0 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -119,8 +119,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force) if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0) return -1; - scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s", - buildid_dir, sbuildid); + scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s", + buildid_dir, DSO__NAME_KCORE, sbuildid); if (!force && !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) { @@ -131,8 +131,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force) if (build_id_cache__kcore_dir(dir, sizeof(dir))) return -1; - scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s", - buildid_dir, sbuildid, dir); + scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s", + buildid_dir, DSO__NAME_KCORE, sbuildid, dir); if (mkdir_p(to_dir, 0755)) return -1; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 4db73d5a0dbc..b811924e5e1b 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1122,7 +1122,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) } else if (dso__is_kcore(dso)) { goto fallback; } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || - strstr(command, "[kernel.kallsyms]") || + strstr(command, DSO__NAME_KALLSYMS) || access(symfs_filename, R_OK)) { free(filename); fallback: diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 639a2903065e..18dd96bdde05 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -709,7 +709,7 @@ static struct dso *machine__get_kernel(struct machine *machine) if (machine__is_host(machine)) { vmlinux_name = symbol_conf.vmlinux_name; if (!vmlinux_name) - vmlinux_name = "[kernel.kallsyms]"; + vmlinux_name = DSO__NAME_KALLSYMS; kernel = machine__findnew_kernel(machine, vmlinux_name, "[kernel]", DSO_TYPE_KERNEL); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 7fb33304fb4e..2252b545ff43 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); - scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, - sbuild_id); + scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir, + DSO__NAME_KCORE, sbuild_id); /* Use /proc/kallsyms if possible */ if (is_host) { @@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) if (!find_matching_kcore(map, path, sizeof(path))) return strdup(path); - scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", - buildid_dir, sbuild_id); + scnprintf(path, sizeof(path), "%s/%s/%s", + buildid_dir, DSO__NAME_KALLSYMS, sbuild_id); if (access(path, F_OK)) { pr_err("No kallsyms or vmlinux with build-id %s was found\n", @@ -1769,7 +1769,7 @@ do_kallsyms: if (err > 0 && !dso__is_kcore(dso)) { dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; - dso__set_long_name(dso, "[kernel.kallsyms]", false); + dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); map__fixup_start(map); map__fixup_end(map); } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 2b5e4ed76fcb..25f2fd672c2e 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ #endif +#define DSO__NAME_KALLSYMS "[kernel.kallsyms]" +#define DSO__NAME_KCORE "[kernel.kcore]" + /** struct symbol - symtab entry * * @ignore - resolvable but tools ignore it (e.g. idle routines) -- cgit v1.2.3-58-ga151 From a29d5c9b8167dbc21a7ca8c0302e3799f9063b4e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 May 2016 21:16:54 -0300 Subject: perf tools: Separate accounting of contexts and real addresses in a stack trace The perf_sample->ip_callchain->nr value includes all the entries in the ip_callchain->ip[] array, real addresses and PERF_CONTEXT_{KERNEL,USER,etc}, while what the user expects is that what is in the kernel.perf_event_max_stack sysctl or in the upcoming per event perf_event_attr.sample_max_stack knob be honoured in terms of IP addresses in the stack trace. So match the kernel support and validate chain->nr taking into account both kernel.perf_event_max_stack and kernel.perf_event_max_contexts_per_stack. Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Brendan Gregg Cc: David Ahern Cc: Frederic Weisbecker Cc: He Kuang Cc: Jiri Olsa Cc: Masami Hiramatsu Cc: Milian Wolff Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Wang Nan Cc: Zefan Li Link: http://lkml.kernel.org/n/tip-mgx0jpzfdq4uq4abfa40byu0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/perf.c | 3 +++ tools/perf/util/machine.c | 26 +++++++++++++++++--------- tools/perf/util/util.c | 3 ++- tools/perf/util/util.h | 3 ++- 4 files changed, 24 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 797000842d40..15982cee5ef3 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -549,6 +549,9 @@ int main(int argc, const char **argv) if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) sysctl_perf_event_max_stack = value; + if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0) + sysctl_perf_event_max_contexts_per_stack = value; + cmd = extract_argv0_path(argv[0]); if (!cmd) cmd = "perf-help"; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 18dd96bdde05..7ba9fadb68af 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1811,9 +1811,9 @@ static int thread__resolve_callchain_sample(struct thread *thread, { struct branch_stack *branch = sample->branch_stack; struct ip_callchain *chain = sample->callchain; - int chain_nr = min(max_stack, (int)chain->nr); + int chain_nr = chain->nr; u8 cpumode = PERF_RECORD_MISC_USER; - int i, j, err; + int i, j, err, nr_entries, nr_contexts; int skip_idx = -1; int first_call = 0; @@ -1828,7 +1828,7 @@ static int thread__resolve_callchain_sample(struct thread *thread, * Based on DWARF debug information, some architectures skip * a callchain entry saved by the kernel. */ - if (chain->nr < sysctl_perf_event_max_stack) + if (chain_nr < sysctl_perf_event_max_stack) skip_idx = arch_skip_callchain_idx(thread, chain); /* @@ -1889,12 +1889,8 @@ static int thread__resolve_callchain_sample(struct thread *thread, } check_calls: - if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) { - pr_warning("corrupted callchain. skipping...\n"); - return 0; - } - - for (i = first_call; i < chain_nr; i++) { + for (i = first_call, nr_entries = 0, nr_contexts = 0; + i < chain_nr && nr_entries < max_stack; i++) { u64 ip; if (callchain_param.order == ORDER_CALLEE) @@ -1908,6 +1904,14 @@ check_calls: #endif ip = chain->ips[j]; + if (ip >= PERF_CONTEXT_MAX) { + if (++nr_contexts > sysctl_perf_event_max_contexts_per_stack) + goto out_corrupted_callchain; + } else { + if (++nr_entries > sysctl_perf_event_max_stack) + goto out_corrupted_callchain; + } + err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip); if (err) @@ -1915,6 +1919,10 @@ check_calls: } return 0; + +out_corrupted_callchain: + pr_warning("corrupted callchain. skipping...\n"); + return 0; } static int unwind_entry(struct unwind_entry *entry, void *arg) diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index eab077ad6ca9..23504ad5d6dd 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -33,7 +33,8 @@ struct callchain_param callchain_param = { unsigned int page_size; int cacheline_size; -unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; +int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; +int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK; bool test_attr__enabled; diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 7651633a8dc7..1e8c3167b9fb 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig); extern unsigned int page_size; extern int cacheline_size; -extern unsigned int sysctl_perf_event_max_stack; +extern int sysctl_perf_event_max_stack; +extern int sysctl_perf_event_max_contexts_per_stack; struct parse_tag { char tag; -- cgit v1.2.3-58-ga151