From 3912f2abc942a002ef611fc973add5e5eadb3432 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Mon, 14 Dec 2009 03:23:56 -0500 Subject: perf: Use format string of printf to align strings Instead of filling whitespaces to do alignment, use printf's format string. This simplifies the code a bit. Signed-off-by: WANG Cong Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091214082700.4224.57640.sendpatchset@localhost.localdomain> Signed-off-by: Ingo Molnar --- tools/perf/builtin-help.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 9f810b17c25c..e427d6965e0c 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -286,8 +286,7 @@ void list_common_cmds_help(void) puts(" The most commonly used perf commands are:"); for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { - printf(" %s ", common_cmds[i].name); - mput_char(' ', longest - strlen(common_cmds[i].name)); + printf(" %-*s ", longest, common_cmds[i].name); puts(common_cmds[i].help); } } -- cgit v1.2.3-58-ga151 From 06aae590033d1ae3c35b2920ef950cfc603e2a2d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:36:59 -0200 Subject: perf session: Move the event processing routines to session.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for an extra "data_map" file since the routines there operate mainly on a perf_session instance. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 - tools/perf/util/data_map.c | 252 --------------------------------------------- tools/perf/util/session.c | 245 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 245 insertions(+), 253 deletions(-) delete mode 100644 tools/perf/util/data_map.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 652a470b5f74..4172c3b0e4a7 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -423,7 +423,6 @@ LIB_OBJS += util/trace-event-perl.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o -LIB_OBJS += util/data_map.o LIB_OBJS += util/probe-event.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c deleted file mode 100644 index b557b836de3d..000000000000 --- a/tools/perf/util/data_map.c +++ /dev/null @@ -1,252 +0,0 @@ -#include "symbol.h" -#include "util.h" -#include "debug.h" -#include "thread.h" -#include "session.h" - -static int process_event_stub(event_t *event __used, - struct perf_session *session __used) -{ - dump_printf(": unhandled!\n"); - return 0; -} - -static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) -{ - if (!handler->process_sample_event) - handler->process_sample_event = process_event_stub; - if (!handler->process_mmap_event) - handler->process_mmap_event = process_event_stub; - if (!handler->process_comm_event) - handler->process_comm_event = process_event_stub; - if (!handler->process_fork_event) - handler->process_fork_event = process_event_stub; - if (!handler->process_exit_event) - handler->process_exit_event = process_event_stub; - if (!handler->process_lost_event) - handler->process_lost_event = process_event_stub; - if (!handler->process_read_event) - handler->process_read_event = process_event_stub; - if (!handler->process_throttle_event) - handler->process_throttle_event = process_event_stub; - if (!handler->process_unthrottle_event) - handler->process_unthrottle_event = process_event_stub; -} - -static const char *event__name[] = { - [0] = "TOTAL", - [PERF_RECORD_MMAP] = "MMAP", - [PERF_RECORD_LOST] = "LOST", - [PERF_RECORD_COMM] = "COMM", - [PERF_RECORD_EXIT] = "EXIT", - [PERF_RECORD_THROTTLE] = "THROTTLE", - [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", - [PERF_RECORD_FORK] = "FORK", - [PERF_RECORD_READ] = "READ", - [PERF_RECORD_SAMPLE] = "SAMPLE", -}; - -unsigned long event__total[PERF_RECORD_MAX]; - -void event__print_totals(void) -{ - int i; - for (i = 0; i < PERF_RECORD_MAX; ++i) - pr_info("%10s events: %10ld\n", - event__name[i], event__total[i]); -} - -static int process_event(event_t *event, struct perf_session *session, - struct perf_event_ops *ops, - unsigned long offset, unsigned long head) -{ - trace_event(event); - - if (event->header.type < PERF_RECORD_MAX) { - dump_printf("%p [%p]: PERF_RECORD_%s", - (void *)(offset + head), - (void *)(long)(event->header.size), - event__name[event->header.type]); - ++event__total[0]; - ++event__total[event->header.type]; - } - - switch (event->header.type) { - case PERF_RECORD_SAMPLE: - return ops->process_sample_event(event, session); - case PERF_RECORD_MMAP: - return ops->process_mmap_event(event, session); - case PERF_RECORD_COMM: - return ops->process_comm_event(event, session); - case PERF_RECORD_FORK: - return ops->process_fork_event(event, session); - case PERF_RECORD_EXIT: - return ops->process_exit_event(event, session); - case PERF_RECORD_LOST: - return ops->process_lost_event(event, session); - case PERF_RECORD_READ: - return ops->process_read_event(event, session); - case PERF_RECORD_THROTTLE: - return ops->process_throttle_event(event, session); - case PERF_RECORD_UNTHROTTLE: - return ops->process_unthrottle_event(event, session); - default: - ops->total_unknown++; - return -1; - } -} - -int perf_header__read_build_ids(int input, u64 offset, u64 size) -{ - struct build_id_event bev; - char filename[PATH_MAX]; - u64 limit = offset + size; - int err = -1; - - while (offset < limit) { - struct dso *dso; - ssize_t len; - - if (read(input, &bev, sizeof(bev)) != sizeof(bev)) - goto out; - - len = bev.header.size - sizeof(bev); - if (read(input, filename, len) != len) - goto out; - - dso = dsos__findnew(filename); - if (dso != NULL) - dso__set_build_id(dso, &bev.build_id); - - offset += bev.header.size; - } - err = 0; -out: - return err; -} - -static struct thread *perf_session__register_idle_thread(struct perf_session *self) -{ - struct thread *thread = perf_session__findnew(self, 0); - - if (!thread || thread__set_comm(thread, "swapper")) { - pr_err("problem inserting idle task.\n"); - thread = NULL; - } - - return thread; -} - -int perf_session__process_events(struct perf_session *self, - struct perf_event_ops *ops) -{ - int err; - unsigned long head, shift; - unsigned long offset = 0; - size_t page_size; - event_t *event; - uint32_t size; - char *buf; - - if (perf_session__register_idle_thread(self) == NULL) - return -ENOMEM; - - perf_event_ops__fill_defaults(ops); - - page_size = getpagesize(); - - head = self->header.data_offset; - self->sample_type = perf_header__sample_type(&self->header); - - err = -EINVAL; - if (ops->sample_type_check && ops->sample_type_check(self) < 0) - goto out_err; - - if (!ops->full_paths) { - char bf[PATH_MAX]; - - if (getcwd(bf, sizeof(bf)) == NULL) { - err = -errno; -out_getcwd_err: - pr_err("failed to get the current directory\n"); - goto out_err; - } - self->cwd = strdup(bf); - if (self->cwd == NULL) { - err = -ENOMEM; - goto out_getcwd_err; - } - self->cwdlen = strlen(self->cwd); - } - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = mmap(NULL, page_size * self->mmap_window, PROT_READ, - MAP_SHARED, self->fd, offset); - if (buf == MAP_FAILED) { - pr_err("failed to mmap file\n"); - err = -errno; - goto out_err; - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * self->mmap_window) { - int munmap_ret; - - shift = page_size * (head / page_size); - - munmap_ret = munmap(buf, page_size * self->mmap_window); - assert(munmap_ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dump_printf("\n%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || process_event(event, self, ops, offset, head) < 0) { - - dump_printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head >= self->header.data_offset + self->header.data_size) - goto done; - - if (offset + head < self->size) - goto more; - -done: - err = 0; -out_err: - return err; -} diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index ce3a6c8abe76..736d4fda9272 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -148,3 +148,248 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self, return syms; } + +static int process_event_stub(event_t *event __used, + struct perf_session *session __used) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) +{ + if (handler->process_sample_event == NULL) + handler->process_sample_event = process_event_stub; + if (handler->process_mmap_event == NULL) + handler->process_mmap_event = process_event_stub; + if (handler->process_comm_event == NULL) + handler->process_comm_event = process_event_stub; + if (handler->process_fork_event == NULL) + handler->process_fork_event = process_event_stub; + if (handler->process_exit_event == NULL) + handler->process_exit_event = process_event_stub; + if (handler->process_lost_event == NULL) + handler->process_lost_event = process_event_stub; + if (handler->process_read_event == NULL) + handler->process_read_event = process_event_stub; + if (handler->process_throttle_event == NULL) + handler->process_throttle_event = process_event_stub; + if (handler->process_unthrottle_event == NULL) + handler->process_unthrottle_event = process_event_stub; +} + +static const char *event__name[] = { + [0] = "TOTAL", + [PERF_RECORD_MMAP] = "MMAP", + [PERF_RECORD_LOST] = "LOST", + [PERF_RECORD_COMM] = "COMM", + [PERF_RECORD_EXIT] = "EXIT", + [PERF_RECORD_THROTTLE] = "THROTTLE", + [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", + [PERF_RECORD_FORK] = "FORK", + [PERF_RECORD_READ] = "READ", + [PERF_RECORD_SAMPLE] = "SAMPLE", +}; + +unsigned long event__total[PERF_RECORD_MAX]; + +void event__print_totals(void) +{ + int i; + for (i = 0; i < PERF_RECORD_MAX; ++i) + pr_info("%10s events: %10ld\n", + event__name[i], event__total[i]); +} + +static int perf_session__process_event(struct perf_session *self, + event_t *event, + struct perf_event_ops *ops, + unsigned long offset, unsigned long head) +{ + trace_event(event); + + if (event->header.type < PERF_RECORD_MAX) { + dump_printf("%p [%p]: PERF_RECORD_%s", + (void *)(offset + head), + (void *)(long)(event->header.size), + event__name[event->header.type]); + ++event__total[0]; + ++event__total[event->header.type]; + } + + switch (event->header.type) { + case PERF_RECORD_SAMPLE: + return ops->process_sample_event(event, self); + case PERF_RECORD_MMAP: + return ops->process_mmap_event(event, self); + case PERF_RECORD_COMM: + return ops->process_comm_event(event, self); + case PERF_RECORD_FORK: + return ops->process_fork_event(event, self); + case PERF_RECORD_EXIT: + return ops->process_exit_event(event, self); + case PERF_RECORD_LOST: + return ops->process_lost_event(event, self); + case PERF_RECORD_READ: + return ops->process_read_event(event, self); + case PERF_RECORD_THROTTLE: + return ops->process_throttle_event(event, self); + case PERF_RECORD_UNTHROTTLE: + return ops->process_unthrottle_event(event, self); + default: + ops->total_unknown++; + return -1; + } +} + +int perf_header__read_build_ids(int input, u64 offset, u64 size) +{ + struct build_id_event bev; + char filename[PATH_MAX]; + u64 limit = offset + size; + int err = -1; + + while (offset < limit) { + struct dso *dso; + ssize_t len; + + if (read(input, &bev, sizeof(bev)) != sizeof(bev)) + goto out; + + len = bev.header.size - sizeof(bev); + if (read(input, filename, len) != len) + goto out; + + dso = dsos__findnew(filename); + if (dso != NULL) + dso__set_build_id(dso, &bev.build_id); + + offset += bev.header.size; + } + err = 0; +out: + return err; +} + +static struct thread *perf_session__register_idle_thread(struct perf_session *self) +{ + struct thread *thread = perf_session__findnew(self, 0); + + if (thread == NULL || thread__set_comm(thread, "swapper")) { + pr_err("problem inserting idle task.\n"); + thread = NULL; + } + + return thread; +} + +int perf_session__process_events(struct perf_session *self, + struct perf_event_ops *ops) +{ + int err; + unsigned long head, shift; + unsigned long offset = 0; + size_t page_size; + event_t *event; + uint32_t size; + char *buf; + + if (perf_session__register_idle_thread(self) == NULL) + return -ENOMEM; + + perf_event_ops__fill_defaults(ops); + + page_size = getpagesize(); + + head = self->header.data_offset; + self->sample_type = perf_header__sample_type(&self->header); + + err = -EINVAL; + if (ops->sample_type_check && ops->sample_type_check(self) < 0) + goto out_err; + + if (!ops->full_paths) { + char bf[PATH_MAX]; + + if (getcwd(bf, sizeof(bf)) == NULL) { + err = -errno; +out_getcwd_err: + pr_err("failed to get the current directory\n"); + goto out_err; + } + self->cwd = strdup(bf); + if (self->cwd == NULL) { + err = -ENOMEM; + goto out_getcwd_err; + } + self->cwdlen = strlen(self->cwd); + } + + shift = page_size * (head / page_size); + offset += shift; + head -= shift; + +remap: + buf = mmap(NULL, page_size * self->mmap_window, PROT_READ, + MAP_SHARED, self->fd, offset); + if (buf == MAP_FAILED) { + pr_err("failed to mmap file\n"); + err = -errno; + goto out_err; + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (size == 0) + size = 8; + + if (head + event->header.size >= page_size * self->mmap_window) { + int munmap_ret; + + shift = page_size * (head / page_size); + + munmap_ret = munmap(buf, page_size * self->mmap_window); + assert(munmap_ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dump_printf("\n%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (size == 0 || + perf_session__process_event(self, event, ops, offset, head) < 0) { + dump_printf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head >= self->header.data_offset + self->header.data_size) + goto done; + + if (offset + head < self->size) + goto more; +done: + err = 0; +out_err: + return err; +} -- cgit v1.2.3-58-ga151 From 4a58e61161074776aa34187ea369414ce4852394 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:00 -0200 Subject: perf tools: Move the map class definition to a separate header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And this resulted in the need for adding some missing includes in some places that were getting the definitions needed out of sheer luck. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/debug.c | 1 + tools/perf/util/event.h | 65 ++----------------------------------- tools/perf/util/map.h | 73 ++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/probe-finder.h | 2 ++ 5 files changed, 80 insertions(+), 62 deletions(-) create mode 100644 tools/perf/util/map.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 4172c3b0e4a7..fafea0b6f323 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -357,6 +357,7 @@ LIB_H += util/event.h LIB_H += util/exec_cmd.h LIB_H += util/types.h LIB_H += util/levenshtein.h +LIB_H += util/map.h LIB_H += util/parse-options.h LIB_H += util/parse-events.h LIB_H += util/quote.h diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 28d520d5a1fb..0905600c3851 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -9,6 +9,7 @@ #include "color.h" #include "event.h" #include "debug.h" +#include "util.h" int verbose = 0; int dump_trace = 0; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 690a96d0467c..80fb3653c809 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -1,10 +1,10 @@ #ifndef __PERF_RECORD_H #define __PERF_RECORD_H +#include + #include "../perf.h" -#include "util.h" -#include -#include +#include "map.h" /* * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * @@ -101,67 +101,8 @@ struct events_stats { void event__print_totals(void); -enum map_type { - MAP__FUNCTION = 0, - MAP__VARIABLE, -}; - -#define MAP__NR_TYPES (MAP__VARIABLE + 1) - -struct map { - union { - struct rb_node rb_node; - struct list_head node; - }; - u64 start; - u64 end; - enum map_type type; - u64 pgoff; - u64 (*map_ip)(struct map *, u64); - u64 (*unmap_ip)(struct map *, u64); - struct dso *dso; -}; - -static inline u64 map__map_ip(struct map *map, u64 ip) -{ - return ip - map->start + map->pgoff; -} - -static inline u64 map__unmap_ip(struct map *map, u64 ip) -{ - return ip + map->start - map->pgoff; -} - -static inline u64 identity__map_ip(struct map *map __used, u64 ip) -{ - return ip; -} - -struct symbol; - -typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); - -void map__init(struct map *self, enum map_type type, - u64 start, u64 end, u64 pgoff, struct dso *dso); -struct map *map__new(struct mmap_event *event, enum map_type, - char *cwd, int cwdlen); -void map__delete(struct map *self); -struct map *map__clone(struct map *self); -int map__overlap(struct map *l, struct map *r); -size_t map__fprintf(struct map *self, FILE *fp); - struct perf_session; -int map__load(struct map *self, struct perf_session *session, - symbol_filter_t filter); -struct symbol *map__find_symbol(struct map *self, struct perf_session *session, - u64 addr, symbol_filter_t filter); -struct symbol *map__find_symbol_by_name(struct map *self, const char *name, - struct perf_session *session, - symbol_filter_t filter); -void map__fixup_start(struct map *self); -void map__fixup_end(struct map *self); - int event__synthesize_thread(pid_t pid, int (*process)(event_t *event, struct perf_session *session), diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h new file mode 100644 index 000000000000..72f0b6ab5ea5 --- /dev/null +++ b/tools/perf/util/map.h @@ -0,0 +1,73 @@ +#ifndef __PERF_MAP_H +#define __PERF_MAP_H + +#include +#include +#include +#include + +enum map_type { + MAP__FUNCTION = 0, + MAP__VARIABLE, +}; + +#define MAP__NR_TYPES (MAP__VARIABLE + 1) + +struct dso; + +struct map { + union { + struct rb_node rb_node; + struct list_head node; + }; + u64 start; + u64 end; + enum map_type type; + u64 pgoff; + u64 (*map_ip)(struct map *, u64); + u64 (*unmap_ip)(struct map *, u64); + struct dso *dso; +}; + +static inline u64 map__map_ip(struct map *map, u64 ip) +{ + return ip - map->start + map->pgoff; +} + +static inline u64 map__unmap_ip(struct map *map, u64 ip) +{ + return ip + map->start - map->pgoff; +} + +static inline u64 identity__map_ip(struct map *map __used, u64 ip) +{ + return ip; +} + +struct symbol; +struct mmap_event; + +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); + +void map__init(struct map *self, enum map_type type, + u64 start, u64 end, u64 pgoff, struct dso *dso); +struct map *map__new(struct mmap_event *event, enum map_type, + char *cwd, int cwdlen); +void map__delete(struct map *self); +struct map *map__clone(struct map *self); +int map__overlap(struct map *l, struct map *r); +size_t map__fprintf(struct map *self, FILE *fp); + +struct perf_session; + +int map__load(struct map *self, struct perf_session *session, + symbol_filter_t filter); +struct symbol *map__find_symbol(struct map *self, struct perf_session *session, + u64 addr, symbol_filter_t filter); +struct symbol *map__find_symbol_by_name(struct map *self, const char *name, + struct perf_session *session, + symbol_filter_t filter); +void map__fixup_start(struct map *self); +void map__fixup_end(struct map *self); + +#endif /* __PERF_MAP_H */ diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index a4086aaddb73..e3f396806e6e 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -1,6 +1,8 @@ #ifndef _PROBE_FINDER_H #define _PROBE_FINDER_H +#include "util.h" + #define MAX_PATH_LEN 256 #define MAX_PROBE_BUFFER 1024 #define MAX_PROBES 128 -- cgit v1.2.3-58-ga151 From 27295592c22e71bbd38110c302da8dbb43912a60 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:01 -0200 Subject: perf session: Share the common trace sample_check routine as perf_session__has_traces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 14 +------------- tools/perf/builtin-sched.c | 14 +------------- tools/perf/builtin-timechart.c | 13 +------------ tools/perf/builtin-trace.c | 14 +------------- tools/perf/util/session.c | 11 +++++++++++ tools/perf/util/session.h | 2 ++ 6 files changed, 17 insertions(+), 51 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index fc21ad79dd83..a85936f09f3e 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -342,22 +342,10 @@ static int process_sample_event(event_t *event, struct perf_session *session) return 0; } -static int sample_type_check(struct perf_session *session) -{ - if (!(session->sample_type & PERF_SAMPLE_RAW)) { - fprintf(stderr, - "No trace sample to read. Did you call perf record " - "without -R?"); - return -1; - } - - return 0; -} - static struct perf_event_ops event_ops = { .process_sample_event = process_sample_event, .process_comm_event = event__process_comm, - .sample_type_check = sample_type_check, + .sample_type_check = perf_session__has_traces, }; static double fragmentation(unsigned long n_req, unsigned long n_alloc) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 80209df6cfe8..d65098c42990 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1653,23 +1653,11 @@ static int process_lost_event(event_t *event __used, return 0; } -static int sample_type_check(struct perf_session *session __used) -{ - if (!(session->sample_type & PERF_SAMPLE_RAW)) { - fprintf(stderr, - "No trace sample to read. Did you call perf record " - "without -R?"); - return -1; - } - - return 0; -} - static struct perf_event_ops event_ops = { .process_sample_event = process_sample_event, .process_comm_event = event__process_comm, .process_lost_event = process_lost_event, - .sample_type_check = sample_type_check, + .sample_type_check = perf_session__has_traces, }; static int read_events(void) diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index a589a43112d6..b42f337c17d9 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1029,23 +1029,12 @@ static void process_samples(struct perf_session *session) } } -static int sample_type_check(struct perf_session *session) -{ - if (!(session->sample_type & PERF_SAMPLE_RAW)) { - fprintf(stderr, "No trace samples found in the file.\n" - "Have you used 'perf timechart record' to record it?\n"); - return -1; - } - - return 0; -} - static struct perf_event_ops event_ops = { .process_comm_event = process_comm_event, .process_fork_event = process_fork_event, .process_exit_event = process_exit_event, .process_sample_event = queue_sample_event, - .sample_type_check = sample_type_check, + .sample_type_check = perf_session__has_traces, }; static int __cmd_timechart(void) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 574a215e800b..b0ba2ac37e2c 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -103,22 +103,10 @@ static int process_sample_event(event_t *event, struct perf_session *session) return 0; } -static int sample_type_check(struct perf_session *session) -{ - if (!(session->sample_type & PERF_SAMPLE_RAW)) { - fprintf(stderr, - "No trace sample to read. Did you call perf record " - "without -R?"); - return -1; - } - - return 0; -} - static struct perf_event_ops event_ops = { .process_sample_event = process_sample_event, .process_comm_event = event__process_comm, - .sample_type_check = sample_type_check, + .sample_type_check = perf_session__has_traces, }; static int __cmd_trace(struct perf_session *session) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 736d4fda9272..60eab8b3ff34 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -393,3 +393,14 @@ done: out_err: return err; } + +int perf_session__has_traces(struct perf_session *self) +{ + if (!(self->sample_type & PERF_SAMPLE_RAW)) { + pr_err("No trace sample to read. Did you call perf record " + "without -R?"); + return -1; + } + + return 0; +} diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 32eaa1bada06..a6951d2f700f 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -56,6 +56,8 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self, struct ip_callchain *chain, struct symbol **parent); +int perf_session__has_traces(struct perf_session *self); + int perf_header__read_build_ids(int input, u64 offset, u64 file_size); #endif /* __PERF_SESSION_H */ -- cgit v1.2.3-58-ga151 From d549c7690190d9739005e19604faad6da4b802ac Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:02 -0200 Subject: perf session: Remove sample_type_check from event_ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is really something tools need to do before asking for the events to be processed, leaving perf_session__process_events to do just that, process events. Also add a msg parameter to perf_session__has_traces() so that the right message can be printed, fixing a regression added by me in the previous cset (right timechart message) and also fixing 'perf kmem', that was not asking if 'perf kmem record' was ran. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 6 ++++-- tools/perf/builtin-report.c | 16 +++++++++------- tools/perf/builtin-sched.c | 7 ++++--- tools/perf/builtin-timechart.c | 6 ++++-- tools/perf/builtin-trace.c | 4 +++- tools/perf/util/session.c | 16 ++++++---------- tools/perf/util/session.h | 3 +-- 7 files changed, 31 insertions(+), 27 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index a85936f09f3e..73b065022e27 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -345,7 +345,6 @@ static int process_sample_event(event_t *event, struct perf_session *session) static struct perf_event_ops event_ops = { .process_sample_event = process_sample_event, .process_comm_event = event__process_comm, - .sample_type_check = perf_session__has_traces, }; static double fragmentation(unsigned long n_req, unsigned long n_alloc) @@ -492,11 +491,14 @@ static void sort_result(void) static int __cmd_kmem(void) { - int err; + int err = -EINVAL; struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); if (session == NULL) return -ENOMEM; + if (!perf_session__has_traces(session, "kmem record")) + goto out_delete; + setup_pager(); err = perf_session__process_events(session, &event_ops); if (err != 0) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index db10c0e8ecae..08259184cedb 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -156,14 +156,14 @@ static int process_read_event(event_t *event, struct perf_session *session __use return 0; } -static int sample_type_check(struct perf_session *session) +static int perf_session__setup_sample_type(struct perf_session *self) { - if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) { + if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { fprintf(stderr, "selected --sort parent, but no" " callchain data. Did you call" " perf record without -g?\n"); - return -1; + return -EINVAL; } if (symbol_conf.use_callchain) { fprintf(stderr, "selected -g but no callchain data." @@ -176,7 +176,7 @@ static int sample_type_check(struct perf_session *session) if (register_callchain_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain" " params\n"); - return -1; + return -EINVAL; } } @@ -191,13 +191,11 @@ static struct perf_event_ops event_ops = { .process_fork_event = event__process_task, .process_lost_event = event__process_lost, .process_read_event = process_read_event, - .sample_type_check = sample_type_check, }; - static int __cmd_report(void) { - int ret; + int ret = -EINVAL; struct perf_session *session; session = perf_session__new(input_name, O_RDONLY, force); @@ -207,6 +205,10 @@ static int __cmd_report(void) if (show_threads) perf_read_values_init(&show_threads_values); + ret = perf_session__setup_sample_type(session); + if (ret) + goto out_delete; + ret = perf_session__process_events(session, &event_ops); if (ret) goto out_delete; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index d65098c42990..e862e71f4e68 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1657,17 +1657,18 @@ static struct perf_event_ops event_ops = { .process_sample_event = process_sample_event, .process_comm_event = event__process_comm, .process_lost_event = process_lost_event, - .sample_type_check = perf_session__has_traces, }; static int read_events(void) { - int err; + int err = -EINVAL; struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); if (session == NULL) return -ENOMEM; - err = perf_session__process_events(session, &event_ops); + if (perf_session__has_traces(session, "record -R")) + err = perf_session__process_events(session, &event_ops); + perf_session__delete(session); return err; } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index b42f337c17d9..825283794985 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1034,17 +1034,19 @@ static struct perf_event_ops event_ops = { .process_fork_event = process_fork_event, .process_exit_event = process_exit_event, .process_sample_event = queue_sample_event, - .sample_type_check = perf_session__has_traces, }; static int __cmd_timechart(void) { struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); - int ret; + int ret = -EINVAL; if (session == NULL) return -ENOMEM; + if (!perf_session__has_traces(session, "timechart record")) + goto out_delete; + ret = perf_session__process_events(session, &event_ops); if (ret) goto out_delete; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index b0ba2ac37e2c..e94f34631585 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -106,7 +106,6 @@ static int process_sample_event(event_t *event, struct perf_session *session) static struct perf_event_ops event_ops = { .process_sample_event = process_sample_event, .process_comm_event = event__process_comm, - .sample_type_check = perf_session__has_traces, }; static int __cmd_trace(struct perf_session *session) @@ -580,6 +579,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) if (session == NULL) return -ENOMEM; + if (!perf_session__has_traces(session, "record -R")) + return -EINVAL; + if (generate_script_lang) { struct stat perf_stat; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 60eab8b3ff34..bc84a5217955 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -73,6 +73,8 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc if (mode == O_RDONLY && perf_session__open(self, force) < 0) goto out_delete; + + self->sample_type = perf_header__sample_type(&self->header); out: return self; out_free: @@ -302,11 +304,6 @@ int perf_session__process_events(struct perf_session *self, page_size = getpagesize(); head = self->header.data_offset; - self->sample_type = perf_header__sample_type(&self->header); - - err = -EINVAL; - if (ops->sample_type_check && ops->sample_type_check(self) < 0) - goto out_err; if (!ops->full_paths) { char bf[PATH_MAX]; @@ -394,13 +391,12 @@ out_err: return err; } -int perf_session__has_traces(struct perf_session *self) +bool perf_session__has_traces(struct perf_session *self, const char *msg) { if (!(self->sample_type & PERF_SAMPLE_RAW)) { - pr_err("No trace sample to read. Did you call perf record " - "without -R?"); - return -1; + pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); + return false; } - return 0; + return true; } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index a6951d2f700f..5771ccb3fe03 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -40,7 +40,6 @@ struct perf_event_ops { event_op process_read_event; event_op process_throttle_event; event_op process_unthrottle_event; - int (*sample_type_check)(struct perf_session *session); unsigned long total_unknown; bool full_paths; }; @@ -56,7 +55,7 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self, struct ip_callchain *chain, struct symbol **parent); -int perf_session__has_traces(struct perf_session *self); +bool perf_session__has_traces(struct perf_session *self, const char *msg); int perf_header__read_build_ids(int input, u64 offset, u64 file_size); -- cgit v1.2.3-58-ga151 From 31d337c4ee3152b7271897eae576251643f5a3b5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:03 -0200 Subject: perf session: Move total_unknown to perf_session->unknown events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As this is a session property, not belonging to perf_event_ops, that can be shared by many perf_session instances. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-7-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/session.c | 3 ++- tools/perf/util/session.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index bc84a5217955..4ca427f73994 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -66,6 +66,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc self->mmap_window = 32; self->cwd = NULL; self->cwdlen = 0; + self->unknown_events = 0; map_groups__init(&self->kmaps); if (perf_session__create_kernel_maps(self) < 0) @@ -239,7 +240,7 @@ static int perf_session__process_event(struct perf_session *self, case PERF_RECORD_UNTHROTTLE: return ops->process_unthrottle_event(event, self); default: - ops->total_unknown++; + self->unknown_events++; return -1; } } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 5771ccb3fe03..585937b6f9ee 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -20,6 +20,7 @@ struct perf_session { struct thread *last_match; struct events_stats events_stats; unsigned long event_total[PERF_RECORD_MAX]; + unsigned long unknown_events; struct rb_root hists; u64 sample_type; int fd; @@ -40,7 +41,6 @@ struct perf_event_ops { event_op process_read_event; event_op process_throttle_event; event_op process_unthrottle_event; - unsigned long total_unknown; bool full_paths; }; -- cgit v1.2.3-58-ga151 From f7d87444e6ee6f4a19634e5412664c1c529a2370 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:04 -0200 Subject: perf session: Move full_paths config to symbol_conf MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now perf_event_ops has just that, event handlers. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-8-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-diff.c | 2 +- tools/perf/builtin-report.c | 2 +- tools/perf/util/session.c | 2 +- tools/perf/util/session.h | 1 - tools/perf/util/symbol.h | 3 ++- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index bd71b8ceafb7..e164b3d45cd4 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -204,7 +204,7 @@ static const struct option options[] = { OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), - OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, + OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths, "Don't shorten the pathnames taking into account the cwd"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 08259184cedb..f695084910c0 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -321,7 +321,7 @@ static const struct option options[] = { "pretty printing style key: normal raw"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), - OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, + OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths, "Don't shorten the pathnames taking into account the cwd"), OPT_STRING('p', "parent", &parent_pattern, "regex", "regex filter to identify parent, see: '--sort parent'"), diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 4ca427f73994..4f2eeb584da8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -306,7 +306,7 @@ int perf_session__process_events(struct perf_session *self, head = self->header.data_offset; - if (!ops->full_paths) { + if (!symbol_conf.full_paths) { char bf[PATH_MAX]; if (getcwd(bf, sizeof(bf)) == NULL) { diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 585937b6f9ee..2ff77fea06ef 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -41,7 +41,6 @@ struct perf_event_ops { event_op process_read_event; event_op process_throttle_event; event_op process_unthrottle_event; - bool full_paths; }; struct perf_session *perf_session__new(const char *filename, int mode, bool force); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 8aded2356f79..9eabd60f819d 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -58,7 +58,8 @@ struct symbol_conf { sort_by_name, show_nr_samples, use_callchain, - exclude_other; + exclude_other, + full_paths; const char *vmlinux_name, *field_sep; char *dso_list_str, -- cgit v1.2.3-58-ga151 From 55aa640f54280da25046acd2075842d464f451e6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:05 -0200 Subject: perf session: Remove redundant prefix & suffix from perf_event_ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since now all that we have are perf event handlers, leave just the name of the event. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-9-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 8 +++---- tools/perf/builtin-diff.c | 12 +++++----- tools/perf/builtin-kmem.c | 4 ++-- tools/perf/builtin-report.c | 14 +++++------ tools/perf/builtin-sched.c | 6 ++--- tools/perf/builtin-timechart.c | 8 +++---- tools/perf/builtin-trace.c | 4 ++-- tools/perf/util/session.c | 54 +++++++++++++++++++++--------------------- tools/perf/util/session.h | 18 +++++++------- 9 files changed, 64 insertions(+), 64 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 593ff25006de..117bbae844bf 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -451,10 +451,10 @@ static void perf_session__find_annotations(struct perf_session *self) } static struct perf_event_ops event_ops = { - .process_sample_event = process_sample_event, - .process_mmap_event = event__process_mmap, - .process_comm_event = event__process_comm, - .process_fork_event = event__process_task, + .sample = process_sample_event, + .mmap = event__process_mmap, + .comm = event__process_comm, + .fork = event__process_task, }; static int __cmd_annotate(void) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index e164b3d45cd4..1cbecaf029fa 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -66,12 +66,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi } static struct perf_event_ops event_ops = { - .process_sample_event = diff__process_sample_event, - .process_mmap_event = event__process_mmap, - .process_comm_event = event__process_comm, - .process_exit_event = event__process_task, - .process_fork_event = event__process_task, - .process_lost_event = event__process_lost, + .sample = diff__process_sample_event, + .mmap = event__process_mmap, + .comm = event__process_comm, + .exit = event__process_task, + .fork = event__process_task, + .lost = event__process_lost, }; static void perf_session__insert_hist_entry_by_name(struct rb_root *root, diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 73b065022e27..4c06828fe39d 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -343,8 +343,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) } static struct perf_event_ops event_ops = { - .process_sample_event = process_sample_event, - .process_comm_event = event__process_comm, + .sample = process_sample_event, + .comm = event__process_comm, }; static double fragmentation(unsigned long n_req, unsigned long n_alloc) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f695084910c0..508934b0140a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -184,13 +184,13 @@ static int perf_session__setup_sample_type(struct perf_session *self) } static struct perf_event_ops event_ops = { - .process_sample_event = process_sample_event, - .process_mmap_event = event__process_mmap, - .process_comm_event = event__process_comm, - .process_exit_event = event__process_task, - .process_fork_event = event__process_task, - .process_lost_event = event__process_lost, - .process_read_event = process_read_event, + .sample = process_sample_event, + .mmap = event__process_mmap, + .comm = event__process_comm, + .exit = event__process_task, + .fork = event__process_task, + .lost = event__process_lost, + .read = process_read_event, }; static int __cmd_report(void) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index e862e71f4e68..702322f8fec1 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1654,9 +1654,9 @@ static int process_lost_event(event_t *event __used, } static struct perf_event_ops event_ops = { - .process_sample_event = process_sample_event, - .process_comm_event = event__process_comm, - .process_lost_event = process_lost_event, + .sample = process_sample_event, + .comm = event__process_comm, + .lost = process_lost_event, }; static int read_events(void) diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 825283794985..5b68d81d93a1 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1030,10 +1030,10 @@ static void process_samples(struct perf_session *session) } static struct perf_event_ops event_ops = { - .process_comm_event = process_comm_event, - .process_fork_event = process_fork_event, - .process_exit_event = process_exit_event, - .process_sample_event = queue_sample_event, + .comm = process_comm_event, + .fork = process_fork_event, + .exit = process_exit_event, + .sample = queue_sample_event, }; static int __cmd_timechart(void) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e94f34631585..1831434aa938 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -104,8 +104,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) } static struct perf_event_ops event_ops = { - .process_sample_event = process_sample_event, - .process_comm_event = event__process_comm, + .sample = process_sample_event, + .comm = event__process_comm, }; static int __cmd_trace(struct perf_session *session) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 4f2eeb584da8..7f0537d1add8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -161,24 +161,24 @@ static int process_event_stub(event_t *event __used, static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) { - if (handler->process_sample_event == NULL) - handler->process_sample_event = process_event_stub; - if (handler->process_mmap_event == NULL) - handler->process_mmap_event = process_event_stub; - if (handler->process_comm_event == NULL) - handler->process_comm_event = process_event_stub; - if (handler->process_fork_event == NULL) - handler->process_fork_event = process_event_stub; - if (handler->process_exit_event == NULL) - handler->process_exit_event = process_event_stub; - if (handler->process_lost_event == NULL) - handler->process_lost_event = process_event_stub; - if (handler->process_read_event == NULL) - handler->process_read_event = process_event_stub; - if (handler->process_throttle_event == NULL) - handler->process_throttle_event = process_event_stub; - if (handler->process_unthrottle_event == NULL) - handler->process_unthrottle_event = process_event_stub; + if (handler->sample == NULL) + handler->sample = process_event_stub; + if (handler->mmap == NULL) + handler->mmap = process_event_stub; + if (handler->comm == NULL) + handler->comm = process_event_stub; + if (handler->fork == NULL) + handler->fork = process_event_stub; + if (handler->exit == NULL) + handler->exit = process_event_stub; + if (handler->lost == NULL) + handler->lost = process_event_stub; + if (handler->read == NULL) + handler->read = process_event_stub; + if (handler->throttle == NULL) + handler->throttle = process_event_stub; + if (handler->unthrottle == NULL) + handler->unthrottle = process_event_stub; } static const char *event__name[] = { @@ -222,23 +222,23 @@ static int perf_session__process_event(struct perf_session *self, switch (event->header.type) { case PERF_RECORD_SAMPLE: - return ops->process_sample_event(event, self); + return ops->sample(event, self); case PERF_RECORD_MMAP: - return ops->process_mmap_event(event, self); + return ops->mmap(event, self); case PERF_RECORD_COMM: - return ops->process_comm_event(event, self); + return ops->comm(event, self); case PERF_RECORD_FORK: - return ops->process_fork_event(event, self); + return ops->fork(event, self); case PERF_RECORD_EXIT: - return ops->process_exit_event(event, self); + return ops->exit(event, self); case PERF_RECORD_LOST: - return ops->process_lost_event(event, self); + return ops->lost(event, self); case PERF_RECORD_READ: - return ops->process_read_event(event, self); + return ops->read(event, self); case PERF_RECORD_THROTTLE: - return ops->process_throttle_event(event, self); + return ops->throttle(event, self); case PERF_RECORD_UNTHROTTLE: - return ops->process_unthrottle_event(event, self); + return ops->unthrottle(event, self); default: self->unknown_events++; return -1; diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 2ff77fea06ef..77c5ee2993c2 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -32,15 +32,15 @@ struct perf_session { typedef int (*event_op)(event_t *self, struct perf_session *session); struct perf_event_ops { - event_op process_sample_event; - event_op process_mmap_event; - event_op process_comm_event; - event_op process_fork_event; - event_op process_exit_event; - event_op process_lost_event; - event_op process_read_event; - event_op process_throttle_event; - event_op process_unthrottle_event; + event_op sample, + mmap, + comm, + fork, + exit, + lost, + read, + throttle, + unthrottle; }; struct perf_session *perf_session__new(const char *filename, int mode, bool force); -- cgit v1.2.3-58-ga151 From 4cf40131a5cf4918e83b3756e58a1fc9e984f8ef Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 27 Dec 2009 21:37:06 -0200 Subject: perf record: Introduce a symtab cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now a cache will be created in a ~/.debug debuginfo like hierarchy, so that at the end of a 'perf record' session all the binaries (with build-ids) involved get collected and indexed by their build-ids, so that perf report can find them. This is interesting when developing software where you want to do a 'perf diff' with the previous build and opens avenues for lots more interesting tools, like a 'perf diff --graph' that takes more than two binaries into account. Tunables for collecting just the symtabs can be added if one doesn't want to have the full binary, but having the full binary allows things like 'perf rerecord' or other tools that can re-run the tests by having access to the exact binary in some perf.data file, so it may well be interesting to keep the full binary there. Space consumption is minimised by trying to use hard links, a 'perf cache' tool to manage the space used, a la ccache is required to purge older entries. With this in place it will be possible also to introduce new commands, 'perf archive' and 'perf restore' (or some more suitable and future proof names) to create a cpio/tar file with the perf data and the files in the cache that _had_ perf hits of interest. There are more aspects to polish, like finding the right vmlinux file to cache, etc, but this is enough for a first step. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1261957026-15580-10-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/header.c | 82 +++++++++++++++++++++++++++++++++++++++++++++--- tools/perf/util/symbol.c | 17 +++++++--- tools/perf/util/symbol.h | 2 ++ tools/perf/util/util.c | 69 ++++++++++++++++++++++++++++++++++++++++ tools/perf/util/util.h | 3 ++ 6 files changed, 165 insertions(+), 9 deletions(-) create mode 100644 tools/perf/util/util.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index fafea0b6f323..7c846424aebf 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -425,6 +425,7 @@ LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o LIB_OBJS += util/probe-event.o +LIB_OBJS += util/util.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 8a0bca55106f..df237c3a041b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -169,20 +169,23 @@ static int do_write(int fd, const void *buf, size_t size) return 0; } +#define dsos__for_each_with_build_id(pos, head) \ + list_for_each_entry(pos, head, node) \ + if (!pos->has_build_id) \ + continue; \ + else + static int __dsos__write_buildid_table(struct list_head *head, int fd) { #define NAME_ALIGN 64 struct dso *pos; static const char zero_buf[NAME_ALIGN]; - list_for_each_entry(pos, head, node) { + dsos__for_each_with_build_id(pos, head) { int err; struct build_id_event b; - size_t len; + size_t len = pos->long_name_len + 1; - if (!pos->has_build_id) - continue; - len = pos->long_name_len + 1; len = ALIGN(len, NAME_ALIGN); memset(&b, 0, sizeof(b)); memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); @@ -209,6 +212,74 @@ static int dsos__write_buildid_table(int fd) return err; } +static int dso__cache_build_id(struct dso *self, const char *debugdir) +{ + const size_t size = PATH_MAX; + char *filename = malloc(size), + *linkname = malloc(size), *targetname, *sbuild_id; + int len, err = -1; + + if (filename == NULL || linkname == NULL) + goto out_free; + + len = snprintf(filename, size, "%s%s", debugdir, self->long_name); + if (mkdir_p(filename, 0755)) + goto out_free; + + len += snprintf(filename + len, sizeof(filename) - len, "/"); + sbuild_id = filename + len; + build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); + + if (access(filename, F_OK) && link(self->long_name, filename) && + copyfile(self->long_name, filename)) + goto out_free; + + len = snprintf(linkname, size, "%s/.build-id/%.2s", + debugdir, sbuild_id); + + if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) + goto out_free; + + snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); + targetname = filename + strlen(debugdir) - 5; + memcpy(targetname, "../..", 5); + + if (symlink(targetname, linkname) == 0) + err = 0; +out_free: + free(filename); + free(linkname); + return err; +} + +static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) +{ + struct dso *pos; + int err = 0; + + dsos__for_each_with_build_id(pos, head) + if (dso__cache_build_id(pos, debugdir)) + err = -1; + + return err; +} + +static int dsos__cache_build_ids(void) +{ + int err_kernel, err_user; + char debugdir[PATH_MAX]; + + snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"), + DEBUG_CACHE_DIR); + + if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) + return -1; + + err_kernel = __dsos__cache_build_ids(&dsos__kernel, debugdir); + err_user = __dsos__cache_build_ids(&dsos__user, debugdir); + return err_kernel || err_user ? -1 : 0; +} + static int perf_header__adds_write(struct perf_header *self, int fd) { int nr_sections; @@ -258,6 +329,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) goto out_free; } buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; + dsos__cache_build_ids(); } lseek(fd, sec_start, SEEK_SET); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index ab92763edb03..79ca6a099f96 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -22,6 +22,7 @@ enum dso_origin { DSO__ORIG_KERNEL = 0, DSO__ORIG_JAVA_JIT, + DSO__ORIG_BUILD_ID_CACHE, DSO__ORIG_FEDORA, DSO__ORIG_UBUNTU, DSO__ORIG_BUILDID, @@ -1191,6 +1192,7 @@ char dso__symtab_origin(const struct dso *self) static const char origin[] = { [DSO__ORIG_KERNEL] = 'k', [DSO__ORIG_JAVA_JIT] = 'j', + [DSO__ORIG_BUILD_ID_CACHE] = 'B', [DSO__ORIG_FEDORA] = 'f', [DSO__ORIG_UBUNTU] = 'u', [DSO__ORIG_BUILDID] = 'b', @@ -1209,6 +1211,7 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session, int size = PATH_MAX; char *name; u8 build_id[BUILD_ID_SIZE]; + char build_id_hex[BUILD_ID_SIZE * 2 + 1]; int ret = -1; int fd; @@ -1230,8 +1233,16 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session, return ret; } - self->origin = DSO__ORIG_FEDORA - 1; + self->origin = DSO__ORIG_BUILD_ID_CACHE; + if (self->has_build_id) { + build_id__sprintf(self->build_id, sizeof(self->build_id), + build_id_hex); + snprintf(name, size, "%s/%s/.build-id/%.2s/%s", + getenv("HOME"), DEBUG_CACHE_DIR, + build_id_hex, build_id_hex + 2); + goto open_file; + } more: do { self->origin++; @@ -1247,8 +1258,6 @@ more: case DSO__ORIG_BUILDID: if (filename__read_build_id(self->long_name, build_id, sizeof(build_id))) { - char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - build_id__sprintf(build_id, sizeof(build_id), build_id_hex); snprintf(name, size, @@ -1276,7 +1285,7 @@ compare_build_id: if (!dso__build_id_equal(self, build_id)) goto more; } - +open_file: fd = open(name, O_RDONLY); } while (fd < 0); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 9eabd60f819d..f27e158943e9 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -8,6 +8,8 @@ #include #include "event.h" +#define DEBUG_CACHE_DIR ".debug" + #ifdef HAVE_CPLUS_DEMANGLE extern char *cplus_demangle(const char *, int); diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c new file mode 100644 index 000000000000..f3c0798a5e78 --- /dev/null +++ b/tools/perf/util/util.c @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include +#include +#include "util.h" + +int mkdir_p(char *path, mode_t mode) +{ + struct stat st; + int err; + char *d = path; + + if (*d != '/') + return -1; + + if (stat(path, &st) == 0) + return 0; + + while (*++d == '/'); + + while ((d = strchr(d, '/'))) { + *d = '\0'; + err = stat(path, &st) && mkdir(path, mode); + *d++ = '/'; + if (err) + return -1; + while (*d == '/') + ++d; + } + return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; +} + +int copyfile(const char *from, const char *to) +{ + int fromfd, tofd; + struct stat st; + void *addr; + int err = -1; + + if (stat(from, &st)) + goto out; + + fromfd = open(from, O_RDONLY); + if (fromfd < 0) + goto out; + + tofd = creat(to, 0755); + if (tofd < 0) + goto out_close_from; + + addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0); + if (addr == MAP_FAILED) + goto out_close_to; + + if (write(tofd, addr, st.st_size) == st.st_size) + err = 0; + + munmap(addr, st.st_size); +out_close_to: + close(tofd); + if (err) + unlink(to); +out_close_from: + close(fromfd); +out: + return err; +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index c673d8825883..0f5b2a6f1080 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -403,4 +403,7 @@ void git_qsort(void *base, size_t nmemb, size_t size, #endif #endif +int mkdir_p(char *path, mode_t mode); +int copyfile(const char *from, const char *to); + #endif -- cgit v1.2.3-58-ga151 From 49f474331e563a6ecf3b1e87ec27ec5482b3e4f1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 27 Dec 2009 11:51:52 +0100 Subject: perf events: Remove arg from perf sched hooks Since we only ever schedule the local cpu, there is no need to pass the cpu number to the perf sched hooks. This micro-optimizes things a bit. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 12 ++++++------ kernel/perf_event.c | 27 ++++++++++++++------------- kernel/sched.c | 6 +++--- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c66b34f75eea..a494e7501292 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -746,10 +746,10 @@ extern int perf_max_events; extern const struct pmu *hw_perf_event_init(struct perf_event *event); -extern void perf_event_task_sched_in(struct task_struct *task, int cpu); +extern void perf_event_task_sched_in(struct task_struct *task); extern void perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu); -extern void perf_event_task_tick(struct task_struct *task, int cpu); + struct task_struct *next); +extern void perf_event_task_tick(struct task_struct *task); extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); @@ -870,12 +870,12 @@ extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); #else static inline void -perf_event_task_sched_in(struct task_struct *task, int cpu) { } +perf_event_task_sched_in(struct task_struct *task) { } static inline void perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu) { } + struct task_struct *next) { } static inline void -perf_event_task_tick(struct task_struct *task, int cpu) { } +perf_event_task_tick(struct task_struct *task) { } static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 03cc061398d1..099bd662daa6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, * not restart the event. */ void perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu) + struct task_struct *next) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = task->perf_event_ctxp; struct perf_event_context *next_ctx; struct perf_event_context *parent; @@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) static void __perf_event_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx, int cpu) + struct perf_cpu_context *cpuctx) { + int cpu = smp_processor_id(); struct perf_event *event; int can_add_hw = 1; @@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx, * accessing the event control register. If a NMI hits, then it will * keep the event running. */ -void perf_event_task_sched_in(struct task_struct *task, int cpu) +void perf_event_task_sched_in(struct task_struct *task) { - struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event_context *ctx = task->perf_event_ctxp; if (likely(!ctx)) return; if (cpuctx->task_ctx == ctx) return; - __perf_event_sched_in(ctx, cpuctx, cpu); + __perf_event_sched_in(ctx, cpuctx); cpuctx->task_ctx = ctx; } -static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) +static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx) { struct perf_event_context *ctx = &cpuctx->ctx; - __perf_event_sched_in(ctx, cpuctx, cpu); + __perf_event_sched_in(ctx, cpuctx); } #define MAX_INTERRUPTS (~0ULL) @@ -1461,7 +1462,7 @@ static void rotate_ctx(struct perf_event_context *ctx) raw_spin_unlock(&ctx->lock); } -void perf_event_task_tick(struct task_struct *curr, int cpu) +void perf_event_task_tick(struct task_struct *curr) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; @@ -1469,7 +1470,7 @@ void perf_event_task_tick(struct task_struct *curr, int cpu) if (!atomic_read(&nr_events)) return; - cpuctx = &per_cpu(perf_cpu_context, cpu); + cpuctx = &__get_cpu_var(perf_cpu_context); ctx = curr->perf_event_ctxp; perf_ctx_adjust_freq(&cpuctx->ctx); @@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu) if (ctx) rotate_ctx(ctx); - perf_event_cpu_sched_in(cpuctx, cpu); + perf_event_cpu_sched_in(cpuctx); if (ctx) - perf_event_task_sched_in(curr, cpu); + perf_event_task_sched_in(curr); } /* @@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) raw_spin_unlock(&ctx->lock); - perf_event_task_sched_in(task, smp_processor_id()); + perf_event_task_sched_in(task); out: local_irq_restore(flags); } diff --git a/kernel/sched.c b/kernel/sched.c index 18cceeecce35..d6527ac0f6e7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2752,7 +2752,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) */ prev_state = prev->state; finish_arch_switch(prev); - perf_event_task_sched_in(current, cpu_of(rq)); + perf_event_task_sched_in(current); finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); @@ -5266,7 +5266,7 @@ void scheduler_tick(void) curr->sched_class->task_tick(rq, curr, 0); raw_spin_unlock(&rq->lock); - perf_event_task_tick(curr, cpu); + perf_event_task_tick(curr); #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); @@ -5480,7 +5480,7 @@ need_resched_nonpreemptible: if (likely(prev != next)) { sched_info_switch(prev, next); - perf_event_task_sched_out(prev, next, cpu); + perf_event_task_sched_out(prev, next); rq->nr_switches++; rq->curr = next; -- cgit v1.2.3-58-ga151 From fd2a50a0240f5f5b59070474eabd83a85720a406 Mon Sep 17 00:00:00 2001 From: Naga Chumbalkar Date: Thu, 24 Dec 2009 01:54:47 +0000 Subject: x86, perfctr: Remove unused func avail_to_resrv_perfctr_nmi() avail_to_resrv_perfctr_nmi() is neither EXPORT'd, nor used in the file. So remove it. Signed-off-by: Naga Chumbalkar Acked-by: Cyrill Gorcunov Cc: oprofile-list@lists.sf.net LKML-Reference: <20091224015441.6005.4408.sendpatchset@localhost.localdomain> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/nmi.h | 1 - arch/x86/kernel/cpu/perfctr-watchdog.c | 11 ----------- 2 files changed, 12 deletions(-) diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 139d4c1a33a7..93da9c3f3341 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); extern int check_nmi_watchdog(void); extern int nmi_watchdog_enabled; extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -extern int avail_to_resrv_perfctr_nmi(unsigned int); extern int reserve_perfctr_nmi(unsigned int); extern void release_perfctr_nmi(unsigned int); extern int reserve_evntsel_nmi(unsigned int); diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 898df9719afb..74f4e85a5727 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -115,17 +115,6 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) return !test_bit(counter, perfctr_nmi_owner); } - -/* checks the an msr for availability */ -int avail_to_resrv_perfctr_nmi(unsigned int msr) -{ - unsigned int counter; - - counter = nmi_perfctr_msr_to_bit(msr); - BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - return !test_bit(counter, perfctr_nmi_owner); -} EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); int reserve_perfctr_nmi(unsigned int msr) -- cgit v1.2.3-58-ga151 From 659d8cfbb225f1fa5a4f8671a847ef3ab5a89660 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Sat, 19 Dec 2009 16:40:28 -0500 Subject: perf tools: Do a few more directory handling optimizations A few more optimizations for perf when dealing with directories. Some of them significantly cut down the work which has to be done. d_type should always be set; otherwise fix the kernel code. And there are functions available to parse fstab-like files, so use them. Signed-off-by: Ulrich Drepper Acked-by: Pekka Enberg Cc: a.p.zijlstra@chello.nl Cc: acme@redhat.com Cc: eranian@google.com Cc: fweisbec@gmail.com Cc: lizf@cn.fujitsu.com Cc: paulus@samba.org Cc: xiaoguangrong@cn.fujitsu.com LKML-Reference: <200912192140.nBJLeSfA028905@hs20-bc2-1.build.redhat.com> [ v2: two small stylistic fixlets ] Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 17 +++++-------- tools/perf/util/trace-event-info.c | 50 ++++++++++++++++++-------------------- 2 files changed, 29 insertions(+), 38 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 4c06828fe39d..05dc5a735039 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -92,23 +92,18 @@ static void setup_cpunode_map(void) if (!dir1) return; - while (true) { - dent1 = readdir(dir1); - if (!dent1) - break; - - if (sscanf(dent1->d_name, "node%u", &mem) < 1) + while ((dent1 = readdir(dir1)) != NULL) { + if (dent1->d_type != DT_DIR || + sscanf(dent1->d_name, "node%u", &mem) < 1) continue; snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); dir2 = opendir(buf); if (!dir2) continue; - while (true) { - dent2 = readdir(dir2); - if (!dent2) - break; - if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1) + while ((dent2 = readdir(dir2)) != NULL) { + if (dent2->d_type != DT_LNK || + sscanf(dent2->d_name, "cpu%u", &cpu) < 1) continue; cpunode_map[cpu] = mem; } diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index cace35595530..dfef238ce158 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -20,6 +20,7 @@ */ #define _GNU_SOURCE #include +#include #include #include #include @@ -103,28 +104,28 @@ static const char *find_debugfs(void) { static char debugfs[MAX_PATH+1]; static int debugfs_found; - char type[100]; FILE *fp; + struct mntent *m; if (debugfs_found) return debugfs; - if ((fp = fopen("/proc/mounts","r")) == NULL) + fp = setmntent("/proc/mounts", "r"); + if (!fp) die("Can't open /proc/mounts for read"); - while (fscanf(fp, "%*s %" - STR(MAX_PATH) - "s %99s %*s %*d %*d\n", - debugfs, type) == 2) { - if (strcmp(type, "debugfs") == 0) + while ((m = getmntent(fp)) != NULL) { + if (strcmp(m->mnt_type, "debugfs") == 0) { + strcpy(debugfs, m->mnt_dir); + debugfs_found = 1; break; + } } - fclose(fp); - if (strcmp(type, "debugfs") != 0) - die("debugfs not mounted, please mount"); + endmntent(fp); - debugfs_found = 1; + if (!debugfs_found) + die("debugfs not mounted, please mount"); return debugfs; } @@ -317,7 +318,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) die("can't read directory '%s'", sys); while ((dent = readdir(dir))) { - if (strcmp(dent->d_name, ".") == 0 || + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || !name_in_tp_list(dent->d_name, tps)) continue; @@ -334,7 +336,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) rewinddir(dir); while ((dent = readdir(dir))) { - if (strcmp(dent->d_name, ".") == 0 || + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || !name_in_tp_list(dent->d_name, tps)) continue; @@ -394,26 +397,21 @@ static void read_event_files(struct tracepoint_path *tps) die("can't read directory '%s'", path); while ((dent = readdir(dir))) { - if (strcmp(dent->d_name, ".") == 0 || + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || strcmp(dent->d_name, "ftrace") == 0 || !system_in_tp_list(dent->d_name, tps)) continue; - sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2); - sprintf(sys, "%s/%s", path, dent->d_name); - ret = stat(sys, &st); - free(sys); - if (ret < 0) - continue; - if (S_ISDIR(st.st_mode)) - count++; + count++; } write_or_die(&count, 4); rewinddir(dir); while ((dent = readdir(dir))) { - if (strcmp(dent->d_name, ".") == 0 || + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || strcmp(dent->d_name, "ftrace") == 0 || !system_in_tp_list(dent->d_name, tps)) @@ -422,10 +420,8 @@ static void read_event_files(struct tracepoint_path *tps) sprintf(sys, "%s/%s", path, dent->d_name); ret = stat(sys, &st); if (ret >= 0) { - if (S_ISDIR(st.st_mode)) { - write_or_die(dent->d_name, strlen(dent->d_name) + 1); - copy_event_system(sys, tps); - } + write_or_die(dent->d_name, strlen(dent->d_name) + 1); + copy_event_system(sys, tps); } free(sys); } -- cgit v1.2.3-58-ga151 From 07b139c8c81b97bbe55c68daf0cbeca8b1c609ca Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 21 Dec 2009 14:27:35 +0800 Subject: perf events: Remove CONFIG_EVENT_PROFILE Quoted from Ingo: | This reminds me - i think we should eliminate CONFIG_EVENT_PROFILE - | it's an unnecessary Kconfig complication. If both PERF_EVENTS and | EVENT_TRACING is enabled we should expose generic tracepoints. | | Nor is it limited to event 'profiling', so it has become a misnomer as | well. Signed-off-by: Li Zefan Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <4B2F1557.2050705@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 2 +- include/linux/perf_event.h | 2 +- include/linux/syscalls.h | 4 ++-- include/trace/ftrace.h | 12 ++++++------ include/trace/syscall.h | 4 ++-- init/Kconfig | 13 ------------- kernel/perf_event.c | 4 ++-- kernel/trace/Makefile | 4 +++- kernel/trace/trace_events_filter.c | 4 ++-- kernel/trace/trace_kprobe.c | 14 +++++++------- kernel/trace/trace_syscalls.c | 5 ++--- 11 files changed, 28 insertions(+), 40 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 2233c98d80df..0a09e758c7d3 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -188,7 +188,7 @@ do { \ __trace_printk(ip, fmt, ##args); \ } while (0) -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS struct perf_event; extern int ftrace_profile_enable(int event_id); extern void ftrace_profile_disable(int event_id); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a494e7501292..9a1d276db754 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -658,7 +658,7 @@ struct perf_event { perf_overflow_handler_t overflow_handler; -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_EVENT_TRACING struct event_filter *filter; #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 65793e90d6f6..b7c7fcf7790b 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -99,7 +99,7 @@ struct perf_event_attr; #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ .profile_enable = prof_sysenter_enable, \ @@ -113,7 +113,7 @@ struct perf_event_attr; #define TRACE_SYS_ENTER_PROFILE_INIT(sname) #define TRACE_SYS_EXIT_PROFILE(sname) #define TRACE_SYS_EXIT_PROFILE_INIT(sname) -#endif +#endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_SYSCALLS #define __SC_STR_ADECL1(t, a) #a diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 73523151a731..2fdd36df41f6 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -498,7 +498,7 @@ static inline int ftrace_get_offsets_##call( \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS /* * Generate the functions needed for tracepoint perf_event support. @@ -541,7 +541,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#endif +#endif /* CONFIG_PERF_EVENTS */ /* * Stage 4 of the trace events. @@ -626,7 +626,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ * */ -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS #define _TRACE_PROFILE_INIT(call) \ .profile_enable = ftrace_profile_enable_##call, \ @@ -634,7 +634,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ #else #define _TRACE_PROFILE_INIT(call) -#endif +#endif /* CONFIG_PERF_EVENTS */ #undef __entry #define __entry entry @@ -834,7 +834,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * } */ -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS #undef __perf_addr #define __perf_addr(a) __addr = (a) @@ -926,7 +926,7 @@ static void ftrace_profile_##call(proto) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#endif /* CONFIG_EVENT_PROFILE */ +#endif /* CONFIG_PERF_EVENTS */ #undef _TRACE_PROFILE_INIT diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 961fda3556bb..3d463dcef298 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -49,12 +49,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif -#ifdef CONFIG_EVENT_PROFILE + +#ifdef CONFIG_PERF_EVENTS int prof_sysenter_enable(struct ftrace_event_call *call); void prof_sysenter_disable(struct ftrace_event_call *call); int prof_sysexit_enable(struct ftrace_event_call *call); void prof_sysexit_disable(struct ftrace_event_call *call); - #endif #endif /* _TRACE_SYSCALL_H */ diff --git a/init/Kconfig b/init/Kconfig index a23da9f01803..06dab27c18d9 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -966,19 +966,6 @@ config PERF_EVENTS Say Y if unsure. -config EVENT_PROFILE - bool "Tracepoint profiling sources" - depends on PERF_EVENTS && EVENT_TRACING - default y - help - Allow the use of tracepoints as software performance events. - - When this is enabled, you can create perf events based on - tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID - found in debugfs://tracing/events/*/*/id. (The -e/--events - option to the perf tool can parse and interpret symbolic - tracepoints, in the subsystem:tracepoint_name format.) - config PERF_COUNTERS bool "Kernel performance counters (old config option)" depends on HAVE_PERF_EVENTS diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 099bd662daa6..5b987b4a98a8 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4177,7 +4177,7 @@ static const struct pmu perf_ops_task_clock = { .read = task_clock_perf_event_read, }; -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size) @@ -4282,7 +4282,7 @@ static void perf_event_free_filter(struct perf_event *event) { } -#endif /* CONFIG_EVENT_PROFILE */ +#endif /* CONFIG_EVENT_TRACING */ #ifdef CONFIG_HAVE_HW_BREAKPOINT static void bp_perf_event_destroy(struct perf_event *event) diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index cd9ecd89ec77..d00c6fe23f54 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -51,7 +51,9 @@ endif obj-$(CONFIG_EVENT_TRACING) += trace_events.o obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o -obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o +ifeq ($(CONFIG_PERF_EVENTS),y) +obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o +endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 50504cb228de..74563d7e102e 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1360,7 +1360,7 @@ out_unlock: return err; } -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS void ftrace_profile_free_filter(struct perf_event *event) { @@ -1428,5 +1428,5 @@ out_unlock: return err; } -#endif /* CONFIG_EVENT_PROFILE */ +#endif /* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 375f81a568dc..75d75dec226a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1249,7 +1249,7 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, ", REC->" FIELD_STRING_RETIP); } -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ static __kprobes int kprobe_profile_func(struct kprobe *kp, @@ -1407,7 +1407,7 @@ static void probe_profile_disable(struct ftrace_event_call *call) disable_kprobe(&tp->rp.kp); } } -#endif /* CONFIG_EVENT_PROFILE */ +#endif /* CONFIG_PERF_EVENTS */ static __kprobes @@ -1417,10 +1417,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) if (tp->flags & TP_FLAG_TRACE) kprobe_trace_func(kp, regs); -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) kprobe_profile_func(kp, regs); -#endif /* CONFIG_EVENT_PROFILE */ +#endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1431,10 +1431,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) if (tp->flags & TP_FLAG_TRACE) kretprobe_trace_func(ri, regs); -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) kretprobe_profile_func(ri, regs); -#endif /* CONFIG_EVENT_PROFILE */ +#endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1463,7 +1463,7 @@ static int register_probe_event(struct trace_probe *tp) call->regfunc = probe_event_enable; call->unregfunc = probe_event_disable; -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS call->profile_enable = probe_profile_enable; call->profile_disable = probe_profile_disable; #endif diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 75289f372dd2..f694f66d75b0 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -421,7 +421,7 @@ int __init init_ftrace_syscalls(void) } core_initcall(init_ftrace_syscalls); -#ifdef CONFIG_EVENT_PROFILE +#ifdef CONFIG_PERF_EVENTS static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); @@ -626,6 +626,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call) mutex_unlock(&syscall_trace_lock); } -#endif - +#endif /* CONFIG_PERF_EVENTS */ -- cgit v1.2.3-58-ga151 From 29c52aa2300173dd45df04dae1f5acc81a2c93b1 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 28 Dec 2009 16:47:12 +0800 Subject: perf tools: Mount debugfs automatically Mount debugfs filesystem under '/sys/kernel/debug', if it's not mounted. Signed-off-by: Xiao Guangrong Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Clark Williams Cc: John Kacur LKML-Reference: <4B387090.7080407@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/perf.c | 2 +- tools/perf/util/debugfs.c | 16 +++++++--------- tools/perf/util/debugfs.h | 2 +- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 873e55fab375..fc89005c3e51 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -388,7 +388,7 @@ static int run_argv(int *argcp, const char ***argv) /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ static void get_debugfs_mntpt(void) { - const char *path = debugfs_find_mountpoint(); + const char *path = debugfs_mount(NULL); if (path) strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c index 06b73ee02c49..1f805fde5fd4 100644 --- a/tools/perf/util/debugfs.c +++ b/tools/perf/util/debugfs.c @@ -106,16 +106,14 @@ int debugfs_valid_entry(const char *path) return 0; } -/* mount the debugfs somewhere */ +/* mount the debugfs somewhere if it's not mounted */ -int debugfs_mount(const char *mountpoint) +char *debugfs_mount(const char *mountpoint) { - char mountcmd[128]; - /* see if it's already mounted */ if (debugfs_find_mountpoint()) { debugfs_premounted = 1; - return 0; + return debugfs_mountpoint; } /* if not mounted and no argument */ @@ -127,13 +125,13 @@ int debugfs_mount(const char *mountpoint) mountpoint = "/sys/kernel/debug"; } + if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0) + return NULL; + /* save the mountpoint */ strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); - /* mount it */ - snprintf(mountcmd, sizeof(mountcmd), - "/bin/mount -t debugfs debugfs %s", mountpoint); - return system(mountcmd); + return debugfs_mountpoint; } /* umount the debugfs */ diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h index 3cd14f9ae784..83a02879745f 100644 --- a/tools/perf/util/debugfs.h +++ b/tools/perf/util/debugfs.h @@ -15,7 +15,7 @@ extern const char *debugfs_find_mountpoint(void); extern int debugfs_valid_mountpoint(const char *debugfs); extern int debugfs_valid_entry(const char *path); -extern int debugfs_mount(const char *mountpoint); +extern char *debugfs_mount(const char *mountpoint); extern int debugfs_umount(void); extern int debugfs_write(const char *entry, const char *value); extern int debugfs_read(const char *entry, char *buffer, size_t size); -- cgit v1.2.3-58-ga151 From 61be3e59ba7a6dbd39f92fd1f107285a0caeb008 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 28 Dec 2009 16:48:30 +0800 Subject: perf trace: Clean up find_debugfs() Remove redundant code for 'perf trace' Signed-off-by: Xiao Guangrong Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Clark Williams Cc: John Kacur LKML-Reference: <4B3870DE.7090500@cn.fujitsu.com> [ v2: resolved conflicts with recent changes ] Signed-off-by: Ingo Molnar --- tools/perf/util/debugfs.c | 1 + tools/perf/util/trace-event-info.c | 29 +++++------------------------ 2 files changed, 6 insertions(+), 24 deletions(-) diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c index 1f805fde5fd4..a88fefc0cc0a 100644 --- a/tools/perf/util/debugfs.c +++ b/tools/perf/util/debugfs.c @@ -130,6 +130,7 @@ char *debugfs_mount(const char *mountpoint) /* save the mountpoint */ strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); + debugfs_found = 1; return debugfs_mountpoint; } diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index dfef238ce158..535176dc95b6 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -38,6 +38,7 @@ #include "../perf.h" #include "trace-event.h" +#include "debugfs.h" #define VERSION "0.5" @@ -102,32 +103,12 @@ void *malloc_or_die(unsigned int size) static const char *find_debugfs(void) { - static char debugfs[MAX_PATH+1]; - static int debugfs_found; - FILE *fp; - struct mntent *m; - - if (debugfs_found) - return debugfs; - - fp = setmntent("/proc/mounts", "r"); - if (!fp) - die("Can't open /proc/mounts for read"); - - while ((m = getmntent(fp)) != NULL) { - if (strcmp(m->mnt_type, "debugfs") == 0) { - strcpy(debugfs, m->mnt_dir); - debugfs_found = 1; - break; - } - } - - endmntent(fp); + const char *path = debugfs_mount(NULL); - if (!debugfs_found) - die("debugfs not mounted, please mount"); + if (!path) + die("Your kernel not support debugfs filesystem"); - return debugfs; + return path; } /* -- cgit v1.2.3-58-ga151 From 9967411e5b324a908e344d6ce66b77bd5d372c3e Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 28 Dec 2009 16:49:38 +0800 Subject: perf trace: Fix forgotten close of file/dir Signed-off-by: Xiao Guangrong Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Clark Williams Cc: John Kacur LKML-Reference: <4B387122.7090801@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-info.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index 535176dc95b6..407fd65b6cdb 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -253,6 +253,8 @@ static void read_header_files(void) write_or_die("header_page", 12); write_or_die(&size, 8); check_size = copy_file_fd(fd); + close(fd); + if (size != check_size) die("wrong size for '%s' size=%lld read=%lld", path, size, check_size); @@ -271,6 +273,7 @@ static void read_header_files(void) if (size != check_size) die("wrong size for '%s'", path); put_tracing_file(path); + close(fd); } static bool name_in_tp_list(char *sys, struct tracepoint_path *tps) @@ -337,6 +340,7 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) free(format); } + closedir(dir); } static void read_ftrace_files(struct tracepoint_path *tps) @@ -407,6 +411,7 @@ static void read_event_files(struct tracepoint_path *tps) free(sys); } + closedir(dir); put_tracing_file(path); } -- cgit v1.2.3-58-ga151 From 41bdcb23dab22bf27361c5f2d89fe895d8904915 Mon Sep 17 00:00:00 2001 From: Liming Wang Date: Tue, 29 Dec 2009 16:37:07 +0800 Subject: perf tools: Unify event type description make event type description to a unified array and the array index consistent to perf_type_id. Signed-off-by: Liming Wang Cc: Frederic Weisbecker Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1262075829-16257-1-git-send-email-liming.wang@windriver.com> Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index e5bc0fb016b2..dc585a835cab 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -835,11 +835,12 @@ int parse_filter(const struct option *opt __used, const char *str, } static const char * const event_type_descriptors[] = { - "", "Hardware event", "Software event", "Tracepoint event", "Hardware cache event", + "Raw hardware event descriptor", + "Hardware breakpoint", }; /* @@ -872,7 +873,7 @@ static void print_tracepoint_events(void) snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); printf(" %-42s [%s]\n", evt_path, - event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); + event_type_descriptors[PERF_TYPE_TRACEPOINT]); } closedir(evt_dir); } @@ -892,9 +893,7 @@ void print_events(void) printf("List of pre-defined events (to be used in -e):\n"); for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { - type = syms->type + 1; - if (type >= ARRAY_SIZE(event_type_descriptors)) - type = 0; + type = syms->type; if (type != prev_type) printf("\n"); @@ -919,17 +918,19 @@ void print_events(void) for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { printf(" %-42s [%s]\n", event_cache_name(type, op, i), - event_type_descriptors[4]); + event_type_descriptors[PERF_TYPE_HW_CACHE]); } } } printf("\n"); - printf(" %-42s [raw hardware event descriptor]\n", - "rNNN"); + printf(" %-42s [%s]\n", + "rNNN", event_type_descriptors[PERF_TYPE_RAW]); printf("\n"); - printf(" %-42s [hardware breakpoint]\n", "mem:[:access]"); + printf(" %-42s [%s]\n", + "mem:[:access]", + event_type_descriptors[PERF_TYPE_BREAKPOINT]); printf("\n"); print_tracepoint_events(); -- cgit v1.2.3-58-ga151 From 63bbd5e2d539c9290b229c832f62d42aac23db94 Mon Sep 17 00:00:00 2001 From: Liming Wang Date: Tue, 29 Dec 2009 16:37:09 +0800 Subject: perf probe: Change CONFIG_KPROBE_TRACER to CONFIG_KPROBE_EVENT make the config name consistent Signed-off-by: Liming Wang Acked-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1262075829-16257-3-git-send-email-liming.wang@windriver.com> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 29465d440043..8e532d9824f0 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -368,7 +368,7 @@ static int open_kprobe_events(int flags, int mode) if (ret < 0) { if (errno == ENOENT) die("kprobe_events file does not exist -" - " please rebuild with CONFIG_KPROBE_TRACER."); + " please rebuild with CONFIG_KPROBE_EVENT."); else die("Could not open kprobe_events file: %s", strerror(errno)); -- cgit v1.2.3-58-ga151 From 769885f372300a7fcfb9e54e4e2990718d40b529 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Dec 2009 22:48:32 -0200 Subject: perf header: Do_read shouldn't die MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Propagate the errors instead, its callers already propagate other errors. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262047716-23171-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index df237c3a041b..6b3cb94e8a2b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -432,19 +432,19 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) return 0; } -static void do_read(int fd, void *buf, size_t size) +static int do_read(int fd, void *buf, size_t size) { while (size) { int ret = read(fd, buf, size); - if (ret < 0) - die("failed to read"); - if (ret == 0) - die("failed to read: missing data"); + if (ret <= 0) + return -1; size -= ret; buf += ret; } + + return 0; } int perf_header__process_sections(struct perf_header *self, int fd, @@ -455,7 +455,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, int nr_sections; int sec_size; int idx = 0; - int err = 0, feat = 1; + int err = -1, feat = 1; nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); if (!nr_sections) @@ -469,8 +469,10 @@ int perf_header__process_sections(struct perf_header *self, int fd, lseek(fd, self->data_offset + self->data_size, SEEK_SET); - do_read(fd, feat_sec, sec_size); + if (do_read(fd, feat_sec, sec_size)) + goto out_free; + err = 0; while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { if (perf_header__has_feat(self, feat)) { struct perf_file_section *sec = &feat_sec[idx++]; @@ -481,18 +483,18 @@ int perf_header__process_sections(struct perf_header *self, int fd, } ++feat; } - +out_free: free(feat_sec); return err; -}; +} int perf_file_header__read(struct perf_file_header *self, struct perf_header *ph, int fd) { lseek(fd, 0, SEEK_SET); - do_read(fd, self, sizeof(*self)); - if (self->magic != PERF_MAGIC || + if (do_read(fd, self, sizeof(*self)) || + self->magic != PERF_MAGIC || self->attr_size != sizeof(struct perf_file_attr)) return -1; @@ -558,7 +560,8 @@ int perf_header__read(struct perf_header *self, int fd) struct perf_header_attr *attr; off_t tmp; - do_read(fd, &f_attr, sizeof(f_attr)); + if (do_read(fd, &f_attr, sizeof(f_attr))) + goto out_errno; tmp = lseek(fd, 0, SEEK_CUR); attr = perf_header_attr__new(&f_attr.attr); @@ -569,7 +572,8 @@ int perf_header__read(struct perf_header *self, int fd) lseek(fd, f_attr.ids.offset, SEEK_SET); for (j = 0; j < nr_ids; j++) { - do_read(fd, &f_id, sizeof(f_id)); + if (do_read(fd, &f_id, sizeof(f_id))) + goto out_errno; if (perf_header_attr__add_id(attr, f_id) < 0) { perf_header_attr__delete(attr); @@ -589,7 +593,8 @@ int perf_header__read(struct perf_header *self, int fd) events = malloc(f_header.event_types.size); if (events == NULL) return -ENOMEM; - do_read(fd, events, f_header.event_types.size); + if (do_read(fd, events, f_header.event_types.size)) + goto out_errno; event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } @@ -599,6 +604,8 @@ int perf_header__read(struct perf_header *self, int fd) self->frozen = 1; return 0; +out_errno: + return -errno; } u64 perf_header__sample_type(struct perf_header *header) -- cgit v1.2.3-58-ga151 From ae99fb2c335ef018520950ddc9692faacab39cf2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Dec 2009 22:48:33 -0200 Subject: perf header: perf_header__push_event() shouldn't die MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just propagate eventual errors. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262047716-23171-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 16 ++++++++++------ tools/perf/util/header.h | 2 +- tools/perf/util/parse-events.c | 18 +++++++++++------- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 6b3cb94e8a2b..709e3252f049 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -105,24 +105,28 @@ struct perf_trace_event_type { static int event_count; static struct perf_trace_event_type *events; -void perf_header__push_event(u64 id, const char *name) +int perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) pr_warning("Event %s will be truncated\n", name); if (!events) { events = malloc(sizeof(struct perf_trace_event_type)); - if (!events) - die("nomem"); + if (events == NULL) + return -ENOMEM; } else { - events = realloc(events, (event_count + 1) * sizeof(struct perf_trace_event_type)); - if (!events) - die("nomem"); + struct perf_trace_event_type *nevents; + + nevents = realloc(events, (event_count + 1) * sizeof(*events)); + if (nevents == NULL) + return -ENOMEM; + events = nevents; } memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); events[event_count].event_id = id; strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); event_count++; + return 0; } char *perf_header__find_event(u64 id) diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index d118d05d3abe..2b69aab67e35 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -64,7 +64,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit); int perf_header__add_attr(struct perf_header *self, struct perf_header_attr *attr); -void perf_header__push_event(u64 id, const char *name); +int perf_header__push_event(u64 id, const char *name); char *perf_header__find_event(u64 id); struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index dc585a835cab..609d5a9470c5 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -753,11 +753,11 @@ modifier: return ret; } -static void store_event_type(const char *orgname) +static int store_event_type(const char *orgname) { char filename[PATH_MAX], *c; FILE *file; - int id; + int id, n; sprintf(filename, "%s/", debugfs_path); strncat(filename, orgname, strlen(orgname)); @@ -769,11 +769,14 @@ static void store_event_type(const char *orgname) file = fopen(filename, "r"); if (!file) - return; - if (fscanf(file, "%i", &id) < 1) - die("cannot store event ID"); + return 0; + n = fscanf(file, "%i", &id); fclose(file); - perf_header__push_event(id, orgname); + if (n < 1) { + pr_err("cannot store event ID\n"); + return -EINVAL; + } + return perf_header__push_event(id, orgname); } int parse_events(const struct option *opt __used, const char *str, int unset __used) @@ -782,7 +785,8 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u enum event_result ret; if (strchr(str, ':')) - store_event_type(str); + if (store_event_type(str) < 0) + return -1; for (;;) { if (nr_counters == MAX_COUNTERS) -- cgit v1.2.3-58-ga151 From 71289be7630fb97f2de6bb2e18a50289dc869f9d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Dec 2009 22:48:34 -0200 Subject: perf report: Add --hide-unresolved/-U command line option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Useful to match the 'overhead' column in 'perf report' with the 'baseline' one in 'perf diff'. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262047716-23171-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 508934b0140a..4292d7afcd60 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -34,6 +34,7 @@ static char const *input_name = "perf.data"; static int force; +static bool hide_unresolved; static int show_threads; static struct perf_read_values show_threads_values; @@ -121,7 +122,7 @@ static int process_sample_event(event_t *event, struct perf_session *session) return -1; } - if (al.filtered) + if (al.filtered || (hide_unresolved && al.sym == NULL)) return 0; if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { @@ -342,6 +343,8 @@ static const struct option options[] = { OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), + OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved, + "Only display entries resolved to a symbol"), OPT_END() }; -- cgit v1.2.3-58-ga151 From cdbae31408cf39372402076cf2e189ec693daa71 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Dec 2009 22:48:35 -0200 Subject: perf diff: Don't add the period for unresolved symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we don't add histograms buckets for them, this way the sum of baselines should be 100%. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262047716-23171-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-diff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 1cbecaf029fa..876a4b981be8 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -51,12 +51,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi return -1; } - if (al.filtered) + if (al.filtered || al.sym == NULL) return 0; event__parse_sample(event, session->sample_type, &data); - if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) { + if (perf_session__add_hist_entry(session, &al, data.period)) { pr_warning("problem incrementing symbol count, skipping event\n"); return -1; } -- cgit v1.2.3-58-ga151 From 9c443dfdd31eddea6cbe6ee0ca469fbcc4e1dc3b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Dec 2009 22:48:36 -0200 Subject: perf diff: Fix support for all --sort combinations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we finish creating the hist_entries we _already_ have them sorted "by name", in fact by what is in --sort, that is exactly how we can find the pairs in perf_session__match_hists as 'comm', 'dso' & 'symbol' all are strings we need to find the matches in the baseline session. So only do the sort by hits followed by a resort by --sort if we need to find the position for shwowing the --displacement of hist entries. Now all these modes work correctly: Example is a simple 'perf record -f find / > /dev/null' ran twice then followed by the following commands: $ perf diff -f --sort comm # Baseline Delta Command # ........ .......... ....... # 0.00% +100.00% find $ perf diff -f --sort dso # Baseline Delta Shared Object # ........ .......... .................. # 59.97% -0.44% [kernel] 21.17% +0.28% libc-2.5.so 18.49% +0.16% [ext3] 0.37% find $ perf diff -f --sort symbol | head -8 # Baseline Delta Symbol # ........ .......... ...... # 6.21% +0.36% [k] ext3fs_dirhash 3.43% +0.41% [.] __GI_strlen 3.53% +0.16% [k] __kmalloc 3.17% +0.49% [k] system_call 3.06% +0.37% [k] ext3_htree_store_dirent $ perf diff -f --sort dso,symbol | head -8 # Baseline Delta Shared Object Symbol # ........ .......... .................. ...... # 6.21% +0.36% [ext3] [k] ext3fs_dirhash 3.43% +0.41% libc-2.5.so [.] __GI_strlen 3.53% +0.16% [kernel] [k] __kmalloc 3.17% +0.49% [kernel] [k] system_call 3.06% +0.37% [ext3] [k] ext3_htree_store_dirent $ And we don't have to do two expensive resorts in the common, non --displacement case. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262047716-23171-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-diff.c | 52 +++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 876a4b981be8..924bfb77a6ab 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -82,29 +82,19 @@ static void perf_session__insert_hist_entry_by_name(struct rb_root *root, struct hist_entry *iter; while (*p != NULL) { - int cmp; parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = strcmp(he->map->dso->name, iter->map->dso->name); - if (cmp > 0) + if (hist_entry__cmp(he, iter) < 0) p = &(*p)->rb_left; - else if (cmp < 0) + else p = &(*p)->rb_right; - else { - cmp = strcmp(he->sym->name, iter->sym->name); - if (cmp > 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } } rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, root); } -static void perf_session__resort_by_name(struct perf_session *self) +static void perf_session__resort_hist_entries(struct perf_session *self) { unsigned long position = 1; struct rb_root tmp = RB_ROOT; @@ -122,29 +112,28 @@ static void perf_session__resort_by_name(struct perf_session *self) self->hists = tmp; } +static void perf_session__set_hist_entries_positions(struct perf_session *self) +{ + perf_session__output_resort(self, self->events_stats.total); + perf_session__resort_hist_entries(self); +} + static struct hist_entry * -perf_session__find_hist_entry_by_name(struct perf_session *self, - struct hist_entry *he) +perf_session__find_hist_entry(struct perf_session *self, + struct hist_entry *he) { struct rb_node *n = self->hists.rb_node; while (n) { struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); - int cmp = strcmp(he->map->dso->name, iter->map->dso->name); + int64_t cmp = hist_entry__cmp(he, iter); - if (cmp > 0) + if (cmp < 0) n = n->rb_left; - else if (cmp < 0) + else if (cmp > 0) n = n->rb_right; - else { - cmp = strcmp(he->sym->name, iter->sym->name); - if (cmp > 0) - n = n->rb_left; - else if (cmp < 0) - n = n->rb_right; - else - return iter; - } + else + return iter; } return NULL; @@ -155,11 +144,9 @@ static void perf_session__match_hists(struct perf_session *old_session, { struct rb_node *nd; - perf_session__resort_by_name(old_session); - for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) { struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node); - pos->pair = perf_session__find_hist_entry_by_name(old_session, pos); + pos->pair = perf_session__find_hist_entry(old_session, pos); } } @@ -177,9 +164,12 @@ static int __cmd_diff(void) ret = perf_session__process_events(session[i], &event_ops); if (ret) goto out_delete; - perf_session__output_resort(session[i], session[i]->events_stats.total); } + perf_session__output_resort(session[1], session[1]->events_stats.total); + if (show_displacement) + perf_session__set_hist_entries_positions(session[0]); + perf_session__match_hists(session[0], session[1]); perf_session__fprintf_hists(session[1], session[0], show_displacement, stdout); -- cgit v1.2.3-58-ga151 From 0fb8ee48d9dfff6a0913ceb0be2068d8be203763 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 31 Dec 2009 05:53:03 +0100 Subject: perf: Drop useless check for ignored frame The check that ignores the debug and nmi stack frames is useless now that we have a frame pointer that makes us start at the right place. We don't anymore have to deal with these. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras LKML-Reference: <1262235183-5320-2-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/stacktrace.h | 2 -- arch/x86/kernel/cpu/perf_event.c | 8 -------- arch/x86/kernel/dumpstack_32.c | 5 ----- arch/x86/kernel/dumpstack_64.c | 5 ----- 4 files changed, 20 deletions(-) diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 35e89122a42f..4dab78edbad9 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -3,8 +3,6 @@ extern int kstack_depth_to_print; -int x86_is_stack_id(int id, char *name); - struct thread_info; struct stacktrace_ops; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index d616c06e99b4..b1bb8c550526 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2297,7 +2297,6 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); -static DEFINE_PER_CPU(int, in_ignored_frame); static void @@ -2313,10 +2312,6 @@ static void backtrace_warning(void *data, char *msg) static int backtrace_stack(void *data, char *name) { - per_cpu(in_ignored_frame, smp_processor_id()) = - x86_is_stack_id(NMI_STACK, name) || - x86_is_stack_id(DEBUG_STACK, name); - return 0; } @@ -2324,9 +2319,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) { struct perf_callchain_entry *entry = data; - if (per_cpu(in_ignored_frame, smp_processor_id())) - return; - if (reliable) callchain_store(entry, addr); } diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index ae775ca47b25..11540a189d93 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -18,11 +18,6 @@ #include "dumpstack.h" -/* Just a stub for now */ -int x86_is_stack_id(int id, char *name) -{ - return 0; -} void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 0ad9597073f5..676bc051252e 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -33,11 +33,6 @@ static char x86_stack_ids[][8] = { #endif }; -int x86_is_stack_id(int id, char *name) -{ - return x86_stack_ids[id - 1] == name; -} - static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, unsigned *usedp, char **idp) { -- cgit v1.2.3-58-ga151 From 60666c630bdb33983a894b050b588b663f38f368 Mon Sep 17 00:00:00 2001 From: Liming Wang Date: Thu, 31 Dec 2009 16:05:50 +0800 Subject: perf tools: Fix --pid option for stat current pid option doesn't work for perf stat. Change it to what perf record --pid acts as. Signed-off-by: Liming Wang Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1262246750-2191-1-git-send-email-liming.wang@windriver.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 106 ++++++++++++++++++++++++++-------------------- 1 file changed, 61 insertions(+), 45 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c70d72003557..e8c85d5aec41 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -44,6 +44,7 @@ #include "util/parse-events.h" #include "util/event.h" #include "util/debug.h" +#include "util/header.h" #include #include @@ -79,6 +80,8 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int event_scaled[MAX_COUNTERS]; +static volatile int done = 0; + struct stats { double n, mean, M2; @@ -247,61 +250,64 @@ static int run_perf_stat(int argc __used, const char **argv) unsigned long long t0, t1; int status = 0; int counter; - int pid; + int pid = target_pid; int child_ready_pipe[2], go_pipe[2]; + const bool forks = (target_pid == -1 && argc > 0); char buf; if (!system_wide) nr_cpus = 1; - if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) { + if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { perror("failed to create pipes"); exit(1); } - if ((pid = fork()) < 0) - perror("failed to fork"); + if (forks) { + if ((pid = fork()) < 0) + perror("failed to fork"); + + if (!pid) { + close(child_ready_pipe[0]); + close(go_pipe[1]); + fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); + + /* + * Do a dummy execvp to get the PLT entry resolved, + * so we avoid the resolver overhead on the real + * execvp call. + */ + execvp("", (char **)argv); + + /* + * Tell the parent we're ready to go + */ + close(child_ready_pipe[1]); + + /* + * Wait until the parent tells us to go. + */ + if (read(go_pipe[0], &buf, 1) == -1) + perror("unable to read pipe"); + + execvp(argv[0], (char **)argv); + + perror(argv[0]); + exit(-1); + } - if (!pid) { - close(child_ready_pipe[0]); - close(go_pipe[1]); - fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); + child_pid = pid; /* - * Do a dummy execvp to get the PLT entry resolved, - * so we avoid the resolver overhead on the real - * execvp call. - */ - execvp("", (char **)argv); - - /* - * Tell the parent we're ready to go + * Wait for the child to be ready to exec. */ close(child_ready_pipe[1]); - - /* - * Wait until the parent tells us to go. - */ - if (read(go_pipe[0], &buf, 1) == -1) + close(go_pipe[0]); + if (read(child_ready_pipe[0], &buf, 1) == -1) perror("unable to read pipe"); - - execvp(argv[0], (char **)argv); - - perror(argv[0]); - exit(-1); + close(child_ready_pipe[0]); } - child_pid = pid; - - /* - * Wait for the child to be ready to exec. - */ - close(child_ready_pipe[1]); - close(go_pipe[0]); - if (read(child_ready_pipe[0], &buf, 1) == -1) - perror("unable to read pipe"); - close(child_ready_pipe[0]); - for (counter = 0; counter < nr_counters; counter++) create_perf_stat_counter(counter, pid); @@ -310,8 +316,12 @@ static int run_perf_stat(int argc __used, const char **argv) */ t0 = rdclock(); - close(go_pipe[1]); - wait(&status); + if (forks) { + close(go_pipe[1]); + wait(&status); + } else { + while(!done); + } t1 = rdclock(); @@ -417,10 +427,13 @@ static void print_stat(int argc, const char **argv) fflush(stdout); fprintf(stderr, "\n"); - fprintf(stderr, " Performance counter stats for \'%s", argv[0]); - - for (i = 1; i < argc; i++) - fprintf(stderr, " %s", argv[i]); + fprintf(stderr, " Performance counter stats for "); + if(target_pid == -1) { + fprintf(stderr, "\'%s", argv[0]); + for (i = 1; i < argc; i++) + fprintf(stderr, " %s", argv[i]); + }else + fprintf(stderr, "task pid \'%d", target_pid); fprintf(stderr, "\'"); if (run_count > 1) @@ -445,6 +458,9 @@ static volatile int signr = -1; static void skip_signal(int signo) { + if(target_pid != -1) + done = 1; + signr = signo; } @@ -461,7 +477,7 @@ static void sig_atexit(void) } static const char * const stat_usage[] = { - "perf stat [] ", + "perf stat [] []", NULL }; @@ -492,7 +508,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) argc = parse_options(argc, argv, options, stat_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc) + if (!argc && target_pid == -1) usage_with_options(stat_usage, options); if (run_count <= 0) usage_with_options(stat_usage, options); -- cgit v1.2.3-58-ga151 From 682b335a5bccf9e5b7e74380784aa2f145d04444 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 4 Jan 2010 16:19:26 -0200 Subject: perf symbols: Generalise the kallsyms parsing routine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will be used to find an specific symbol by name on 'perf record' to support relocation reference symbols to support relocatable kernels. Still have to conver the perf trace tools to use it instead of their current reimplementation. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262629169-22797-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 74 +++++++++++++++++++++++++++++++----------------- tools/perf/util/symbol.h | 2 ++ 2 files changed, 50 insertions(+), 26 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 79ca6a099f96..b9e0da57d84b 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -383,16 +383,12 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) return ret; } -/* - * Loads the function entries in /proc/kallsyms into kernel_map->dso, - * so that we can in the next step set the symbol ->end address and then - * call kernel_maps__split_kallsyms. - */ -static int dso__load_all_kallsyms(struct dso *self, struct map *map) +int kallsyms__parse(void *arg, int (*process_symbol)(void *arg, const char *name, + char type, u64 start)) { char *line = NULL; size_t n; - struct rb_root *root = &self->symbols[map->type]; + int err = 0; FILE *file = fopen("/proc/kallsyms", "r"); if (file == NULL) @@ -400,7 +396,6 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map) while (!feof(file)) { u64 start; - struct symbol *sym; int line_len, len; char symbol_type; char *symbol_name; @@ -421,35 +416,62 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map) continue; symbol_type = toupper(line[len]); - if (!symbol_type__is_a(symbol_type, map->type)) - continue; - symbol_name = line + len + 2; - /* - * Will fix up the end later, when we have all symbols sorted. - */ - sym = symbol__new(start, 0, symbol_name); - if (sym == NULL) - goto out_delete_line; - /* - * We will pass the symbols to the filter later, in - * map__split_kallsyms, when we have split the maps per module - */ - symbols__insert(root, sym); + err = process_symbol(arg, symbol_name, symbol_type, start); + if (err) + break; } free(line); fclose(file); + return err; - return 0; - -out_delete_line: - free(line); out_failure: return -1; } +struct process_kallsyms_args { + struct map *map; + struct dso *dso; +}; + +static int map__process_kallsym_symbol(void *arg, const char *name, + char type, u64 start) +{ + struct symbol *sym; + struct process_kallsyms_args *a = arg; + struct rb_root *root = &a->dso->symbols[a->map->type]; + + if (!symbol_type__is_a(type, a->map->type)) + return 0; + + /* + * Will fix up the end later, when we have all symbols sorted. + */ + sym = symbol__new(start, 0, name); + + if (sym == NULL) + return -ENOMEM; + /* + * We will pass the symbols to the filter later, in + * map__split_kallsyms, when we have split the maps per module + */ + symbols__insert(root, sym); + return 0; +} + +/* + * Loads the function entries in /proc/kallsyms into kernel_map->dso, + * so that we can in the next step set the symbol ->end address and then + * call kernel_maps__split_kallsyms. + */ +static int dso__load_all_kallsyms(struct dso *self, struct map *map) +{ + struct process_kallsyms_args args = { .map = map, .dso = self, }; + return kallsyms__parse(&args, map__process_kallsym_symbol); +} + /* * Split the symbols into maps, making sure there are no overlaps, i.e. the * kernel range is broken in several maps, named [kernel].N, as we don't have diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index f27e158943e9..21313e87c37b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -135,6 +135,8 @@ int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); +int kallsyms__parse(void *arg, int (*process_symbol)(void *arg, const char *name, + char type, u64 start)); int symbol__init(void); int perf_session__create_kernel_maps(struct perf_session *self); -- cgit v1.2.3-58-ga151 From 36a3e6461a0dac8e84b8c94877365324010c151b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 4 Jan 2010 16:19:27 -0200 Subject: perf symbols: Export symbol_type__is_a MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will be needed by the new HEADER_DSO_INFO feature that will be a HEADER_BUILD_ID superset, replacing it. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262629169-22797-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 2 +- tools/perf/util/symbol.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b9e0da57d84b..5dffcd132d15 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -64,7 +64,7 @@ static void dso__set_sorted_by_name(struct dso *self, enum map_type type) self->sorted_by_name |= (1 << type); } -static bool symbol_type__is_a(char symbol_type, enum map_type map_type) +bool symbol_type__is_a(char symbol_type, enum map_type map_type) { switch (map_type) { case MAP__FUNCTION: diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 21313e87c37b..b2b5330a82a0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -139,6 +139,8 @@ int kallsyms__parse(void *arg, int (*process_symbol)(void *arg, const char *name char type, u64 start)); int symbol__init(void); +bool symbol_type__is_a(char symbol_type, enum map_type map_type); + int perf_session__create_kernel_maps(struct perf_session *self); extern struct list_head dsos__user, dsos__kernel; -- cgit v1.2.3-58-ga151 From f92cb24c78a7c853435e46a20d1bd5c894378132 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 4 Jan 2010 16:19:28 -0200 Subject: perf tools: Create write_padded routine out of __dsos__write_buildid_table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will be used by other options where padding is needed. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262629169-22797-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 709e3252f049..942f7da8bf84 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -173,6 +173,20 @@ static int do_write(int fd, const void *buf, size_t size) return 0; } +#define NAME_ALIGN 64 + +static int write_padded(int fd, const void *bf, size_t count, + size_t count_aligned) +{ + static const char zero_buf[NAME_ALIGN]; + int err = do_write(fd, bf, count); + + if (!err) + err = do_write(fd, zero_buf, count_aligned - count); + + return err; +} + #define dsos__for_each_with_build_id(pos, head) \ list_for_each_entry(pos, head, node) \ if (!pos->has_build_id) \ @@ -181,9 +195,7 @@ static int do_write(int fd, const void *buf, size_t size) static int __dsos__write_buildid_table(struct list_head *head, int fd) { -#define NAME_ALIGN 64 struct dso *pos; - static const char zero_buf[NAME_ALIGN]; dsos__for_each_with_build_id(pos, head) { int err; @@ -197,10 +209,8 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd) err = do_write(fd, &b, sizeof(b)); if (err < 0) return err; - err = do_write(fd, pos->long_name, pos->long_name_len + 1); - if (err < 0) - return err; - err = do_write(fd, zero_buf, len - pos->long_name_len - 1); + err = write_padded(fd, pos->long_name, + pos->long_name_len + 1, len); if (err < 0) return err; } -- cgit v1.2.3-58-ga151 From de1764892a61a3ed212973cc028c80dd083179dd Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 4 Jan 2010 16:19:29 -0200 Subject: perf session: Keep pointers to the vmlinux maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that tools such as 'perf probe' don't have to lookup '[kernel.kallsyms]' but instead access them directly after perf_session__create_kernel_maps or map_groups__create_kernel_maps. Signed-off-by: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262629169-22797-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 4 +--- tools/perf/util/session.h | 1 + tools/perf/util/symbol.c | 29 +++++++++++++---------------- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index c1e6774fd3ed..ffdd3fe87b4a 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -235,9 +235,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) session.psession = perf_session__new(NULL, O_WRONLY, false); if (session.psession == NULL) die("Failed to init perf_session."); - session.kmap = map_groups__find_by_name(&session.psession->kmaps, - MAP__FUNCTION, - "[kernel.kallsyms]"); + session.kmap = session.psession->vmlinux_maps[MAP__FUNCTION]; if (!session.kmap) die("Could not find kernel map.\n"); diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 77c5ee2993c2..8db37bbf0e62 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -18,6 +18,7 @@ struct perf_session { struct map_groups kmaps; struct rb_root threads; struct thread *last_match; + struct map *vmlinux_maps[MAP__NR_TYPES]; struct events_stats events_stats; unsigned long event_total[PERF_RECORD_MAX]; unsigned long unknown_events; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 5dffcd132d15..e290429e9c00 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1662,7 +1662,7 @@ size_t dsos__fprintf_buildid(FILE *fp) __dsos__fprintf_buildid(&dsos__user, fp)); } -static struct dso *dsos__create_kernel( const char *vmlinux) +static struct dso *dsos__create_kernel(const char *vmlinux) { struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]"); @@ -1691,29 +1691,26 @@ out_delete_kernel_dso: return NULL; } -static int map_groups__create_kernel_maps(struct map_groups *self, const char *vmlinux) +static int map_groups__create_kernel_maps(struct map_groups *self, + struct map *vmlinux_maps[MAP__NR_TYPES], + const char *vmlinux) { - struct map *functions, *variables; struct dso *kernel = dsos__create_kernel(vmlinux); + enum map_type type; if (kernel == NULL) return -1; - functions = map__new2(0, kernel, MAP__FUNCTION); - if (functions == NULL) - return -1; + for (type = 0; type < MAP__NR_TYPES; ++type) { + vmlinux_maps[type] = map__new2(0, kernel, type); + if (vmlinux_maps[type] == NULL) + return -1; - variables = map__new2(0, kernel, MAP__VARIABLE); - if (variables == NULL) { - map__delete(functions); - return -1; + vmlinux_maps[type]->map_ip = + vmlinux_maps[type]->unmap_ip = identity__map_ip; + map_groups__insert(self, vmlinux_maps[type]); } - functions->map_ip = functions->unmap_ip = - variables->map_ip = variables->unmap_ip = identity__map_ip; - map_groups__insert(self, functions); - map_groups__insert(self, variables); - return 0; } @@ -1824,7 +1821,7 @@ out_free_comm_list: int perf_session__create_kernel_maps(struct perf_session *self) { - if (map_groups__create_kernel_maps(&self->kmaps, + if (map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps, symbol_conf.vmlinux_name) < 0) return -1; -- cgit v1.2.3-58-ga151 From b9a63b9b56d6910a25e3d4905525aef150420a9b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 5 Jan 2010 11:54:45 -0200 Subject: perf report: Fix --no-call-chain option handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid the funny: [root@doppio ~]# perf record -a -f sleep 2s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.334 MB perf.data (~14572 samples) ] [root@doppio ~]# perf report --no-call-graph selected -g but no callchain data. Did you call perf record without -g? And fix the bug reported by peterz when we do indeed record with callchains and then ask for a report without: [root@doppio ~]# perf record -a -g -f sleep 2s [root@doppio ~]# perf report --no-call-graph Segmentation fault [root@doppio ~]# Reported-by: Peter Zijlstra Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262699685-27820-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 4292d7afcd60..80d691a4191f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -35,6 +35,7 @@ static char const *input_name = "perf.data"; static int force; static bool hide_unresolved; +static bool dont_use_callchains; static int show_threads; static struct perf_read_values show_threads_values; @@ -172,7 +173,8 @@ static int perf_session__setup_sample_type(struct perf_session *self) " -g?\n"); return -1; } - } else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { + } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && + !symbol_conf.use_callchain) { symbol_conf.use_callchain = true; if (register_callchain_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain" @@ -246,11 +248,19 @@ out_delete: static int parse_callchain_opt(const struct option *opt __used, const char *arg, - int unset __used) + int unset) { char *tok; char *endptr; + /* + * --no-call-graph + */ + if (unset) { + dont_use_callchains = true; + return 0; + } + symbol_conf.use_callchain = true; if (!arg) -- cgit v1.2.3-58-ga151 From 56b03f3c4d641dbdbce2e52a2969712e85b0e030 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 5 Jan 2010 16:50:31 -0200 Subject: perf tools: Handle relocatable kernels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DSOs don't have this problem because the kernel emits a PERF_MMAP for each new executable mapping it performs on monitored threads. To fix the kernel case we simulate the same behaviour, by having 'perf record' to synthesize a PERF_MMAP for the kernel, encoded like this: [root@doppio ~]# perf record -a -f sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.344 MB perf.data (~15038 samples) ] [root@doppio ~]# perf report -D | head -10 0xd0 [0x40]: event: 1 . . ... raw event: size 64 bytes . 0000: 01 00 00 00 00 00 40 00 00 00 00 00 00 00 00 00 ......@........ . 0010: 00 00 00 81 ff ff ff ff 00 00 00 00 00 00 00 00 ............... . 0020: 00 00 00 00 00 00 00 00 5b 6b 65 72 6e 65 6c 2e ........ [kernel . 0030: 6b 61 6c 6c 73 79 6d 73 2e 5f 74 65 78 74 5d 00 kallsyms._text] . 0xd0 [0x40]: PERF_RECORD_MMAP 0/0: [0xffffffff81000000((nil)) @ (nil)]: [kernel.kallsyms._text] I.e. we identify such event as having: .pid = 0 .filename = [kernel.kallsyms.REFNAME] .start = REFNAME addr in /proc/kallsyms at 'perf record' time and use now a hardcoded value of '.text' for REFNAME. Then, later, in 'perf report', if there are any kernel hits and thus we need to resolve kernel symbols, we search for REFNAME and if its address changed, relocation happened and we thus must change the kernel mapping routines to one that uses .pgoff as the relocation to apply. This way we use the same mechanism used for the other DSOs and don't have to do a two pass in all the kernel symbols. Reported-by: Xiao Guangrong Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Xiao Guangrong LKML-Reference: <1262717431-1246-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 7 +++++ tools/perf/util/event.c | 64 ++++++++++++++++++++++++++++++++++++++++++--- tools/perf/util/event.h | 4 +++ tools/perf/util/session.c | 46 ++++++++++++++++++++++++++++++++ tools/perf/util/session.h | 10 +++++++ tools/perf/util/symbol.c | 7 +++-- 6 files changed, 133 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 265425322734..8f88420e066b 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -551,6 +551,13 @@ static int __cmd_record(int argc, const char **argv) return err; } + err = event__synthesize_kernel_mmap(process_synthesized_event, + session, "_text"); + if (err < 0) { + pr_err("Couldn't record kernel reference relocation symbol.\n"); + return err; + } + if (!system_wide && profile_cpu == -1) event__synthesize_thread(pid, process_synthesized_event, session); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index bb0fd6da2d56..1a31feb9999f 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -189,6 +189,50 @@ void event__synthesize_threads(int (*process)(event_t *event, closedir(proc); } +struct process_symbol_args { + const char *name; + u64 start; +}; + +static int find_symbol_cb(void *arg, const char *name, char type, u64 start) +{ + struct process_symbol_args *args = arg; + + if (!symbol_type__is_a(type, MAP__FUNCTION) || strcmp(name, args->name)) + return 0; + + args->start = start; + return 1; +} + +int event__synthesize_kernel_mmap(int (*process)(event_t *event, + struct perf_session *session), + struct perf_session *session, + const char *symbol_name) +{ + size_t size; + event_t ev = { + .header = { .type = PERF_RECORD_MMAP }, + }; + /* + * We should get this from /sys/kernel/sections/.text, but till that is + * available use this, and after it is use this as a fallback for older + * kernels. + */ + struct process_symbol_args args = { .name = symbol_name, }; + + if (kallsyms__parse(&args, find_symbol_cb) <= 0) + return -ENOENT; + + size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), + "[kernel.kallsyms.%s]", symbol_name) + 1; + size = ALIGN(size, sizeof(u64)); + ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); + ev.mmap.start = args.start; + + return process(&ev, session); +} + static void thread__comm_adjust(struct thread *self) { char *comm = self->comm; @@ -240,9 +284,9 @@ int event__process_lost(event_t *self, struct perf_session *session) int event__process_mmap(event_t *self, struct perf_session *session) { - struct thread *thread = perf_session__findnew(session, self->mmap.pid); - struct map *map = map__new(&self->mmap, MAP__FUNCTION, - session->cwd, session->cwdlen); + struct thread *thread; + struct map *map; + static const char kmmap_prefix[] = "[kernel.kallsyms."; dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", self->mmap.pid, self->mmap.tid, @@ -251,6 +295,20 @@ int event__process_mmap(event_t *self, struct perf_session *session) (void *)(long)self->mmap.pgoff, self->mmap.filename); + if (self->mmap.pid == 0 && + memcmp(self->mmap.filename, kmmap_prefix, + sizeof(kmmap_prefix) - 1) == 0) { + const char *symbol_name = (self->mmap.filename + + sizeof(kmmap_prefix) - 1); + perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name, + self->mmap.start); + return 0; + } + + thread = perf_session__findnew(session, self->mmap.pid); + map = map__new(&self->mmap, MAP__FUNCTION, + session->cwd, session->cwdlen); + if (thread == NULL || map == NULL) dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); else diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 80fb3653c809..61fc0dc658c2 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -110,6 +110,10 @@ int event__synthesize_thread(pid_t pid, void event__synthesize_threads(int (*process)(event_t *event, struct perf_session *session), struct perf_session *session); +int event__synthesize_kernel_mmap(int (*process)(event_t *event, + struct perf_session *session), + struct perf_session *session, + const char *symbol_name); int event__process_comm(event_t *self, struct perf_session *session); int event__process_lost(event_t *self, struct perf_session *session); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 7f0537d1add8..e0e6a075489e 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -401,3 +401,49 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg) return true; } + +int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, + const char *symbol_name, + u64 addr) +{ + char *bracket; + + self->ref_reloc_sym.name = strdup(symbol_name); + if (self->ref_reloc_sym.name == NULL) + return -ENOMEM; + + bracket = strchr(self->ref_reloc_sym.name, ']'); + if (bracket) + *bracket = '\0'; + + self->ref_reloc_sym.addr = addr; + return 0; +} + +static u64 map__reloc_map_ip(struct map *map, u64 ip) +{ + return ip + (s64)map->pgoff; +} + +static u64 map__reloc_unmap_ip(struct map *map, u64 ip) +{ + return ip - (s64)map->pgoff; +} + +void perf_session__reloc_vmlinux_maps(struct perf_session *self, + u64 unrelocated_addr) +{ + enum map_type type; + s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr; + + if (!reloc) + return; + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct map *map = self->vmlinux_maps[type]; + + map->map_ip = map__reloc_map_ip; + map->unmap_ip = map__reloc_unmap_ip; + map->pgoff = reloc; + } +} diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 8db37bbf0e62..d4a9d20f8d44 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -24,6 +24,10 @@ struct perf_session { unsigned long unknown_events; struct rb_root hists; u64 sample_type; + struct { + const char *name; + u64 addr; + } ref_reloc_sym; int fd; int cwdlen; char *cwd; @@ -59,4 +63,10 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg); int perf_header__read_build_ids(int input, u64 offset, u64 file_size); +int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, + const char *symbol_name, + u64 addr); +void perf_session__reloc_vmlinux_maps(struct perf_session *self, + u64 unrelocated_addr); + #endif /* __PERF_SESSION_H */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e290429e9c00..da2f07f1af8f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -956,11 +956,15 @@ static int dso__load_sym(struct dso *self, struct map *map, elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; - const char *elf_name; + const char *elf_name = elf_sym__name(&sym, symstrs); char *demangled = NULL; int is_label = elf_sym__is_label(&sym); const char *section_name; + if (kernel && session->ref_reloc_sym.name != NULL && + strcmp(elf_name, session->ref_reloc_sym.name) == 0) + perf_session__reloc_vmlinux_maps(session, sym.st_value); + if (!is_label && !elf_sym__is_a(&sym, map->type)) continue; @@ -973,7 +977,6 @@ static int dso__load_sym(struct dso *self, struct map *map, if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) continue; - elf_name = elf_sym__name(&sym, symstrs); section_name = elf_sec__name(&shdr, secstrs); if (kernel || kmodule) { -- cgit v1.2.3-58-ga151 From ec3a9039601af210fca4650d229621fe5a21df0b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:46:41 -0500 Subject: tracing/kprobe: Update example output in documentation Update example output in documentation according to current implementation. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <20100105224641.19431.34967.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- Documentation/trace/kprobetrace.txt | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 47aabeebbdf6..c3eff6ff945f 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -97,23 +97,24 @@ recording return value as "myretprobe" event. cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format name: myprobe -ID: 75 +ID: 780 format: - field:unsigned short common_type; offset:0; size:2; - field:unsigned char common_flags; offset:2; size:1; - field:unsigned char common_preempt_count; offset:3; size:1; - field:int common_pid; offset:4; size:4; - field:int common_tgid; offset:8; size:4; - - field: unsigned long ip; offset:16;tsize:8; - field: int nargs; offset:24;tsize:4; - field: unsigned long dfd; offset:32;tsize:8; - field: unsigned long filename; offset:40;tsize:8; - field: unsigned long flags; offset:48;tsize:8; - field: unsigned long mode; offset:56;tsize:8; - -print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode - + field:unsigned short common_type; offset:0; size:2; signed:0; + field:unsigned char common_flags; offset:2; size:1; signed:0; + field:unsigned char common_preempt_count; offset:3; size:1;signed:0; + field:int common_pid; offset:4; size:4; signed:1; + field:int common_lock_depth; offset:8; size:4; signed:1; + + field:unsigned long __probe_ip; offset:12; size:4; signed:0; + field:int __probe_nargs; offset:16; size:4; signed:1; + field:unsigned long dfd; offset:20; size:4; signed:0; + field:unsigned long filename; offset:24; size:4; signed:0; + field:unsigned long flags; offset:28; size:4; signed:0; + field:unsigned long mode; offset:32; size:4; signed:0; + + +print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->__probe_ip, +REC->dfd, REC->filename, REC->flags, REC->mode You can see that the event has 4 arguments as in the expressions you specified. -- cgit v1.2.3-58-ga151 From 14640106f243a3b29944d7198569090fa6546f2d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:46:48 -0500 Subject: tracing/kprobe: Drop function argument access syntax Drop function argument access syntax, because the function arguments depend on not only architecture but also compile-options and function API. And now, we have perf-probe for finding register/memory assigned to each argument. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Roland McGrath Cc: Oleg Nesterov Cc: Mahesh Salgaonkar Cc: Benjamin Herrenschmidt Cc: Michael Neuling Cc: linuxppc-dev@ozlabs.org LKML-Reference: <20100105224648.19431.52309.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- Documentation/trace/kprobetrace.txt | 21 ++++++++++----------- kernel/trace/trace_kprobe.c | 18 +----------------- 2 files changed, 11 insertions(+), 28 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index c3eff6ff945f..f30978e001f8 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -37,15 +37,12 @@ Synopsis of kprobe_events @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) $stackN : Fetch Nth entry of stack (N >= 0) $stack : Fetch stack address. - $argN : Fetch function argument. (N >= 0)(*) - $retval : Fetch return value.(**) - +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***) + $retval : Fetch return value.(*) + +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**) NAME=FETCHARG: Set NAME as the argument name of FETCHARG. - (*) aN may not correct on asmlinkaged functions and at the middle of - function body. - (**) only for return probe. - (***) this is useful for fetching a field of data structures. + (*) only for return probe. + (**) this is useful for fetching a field of data structures. Per-Probe Event Filtering @@ -82,11 +79,14 @@ Usage examples To add a probe as a new event, write a new definition to kprobe_events as below. - echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events + echo p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack) > /sys/kernel/debug/tracing/kprobe_events This sets a kprobe on the top of do_sys_open() function with recording -1st to 4th arguments as "myprobe" event. As this example shows, users can -choose more familiar names for each arguments. +1st to 4th arguments as "myprobe" event. Note, which register/stack entry is +assigned to each function argument depends on arch-specific ABI. If you unsure +the ABI, please try to use probe subcommand of perf-tools (you can find it +under tools/perf/). +As this example shows, users can choose more familiar names for each arguments. echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events @@ -147,4 +147,3 @@ events, you need to enable it. returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel returns from do_sys_open to sys_open+0x1b). - diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 47f54ab57b68..7ac728ded964 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -91,11 +91,6 @@ static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) return retval; } -static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) -{ - return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); -} - static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, void *dummy) { @@ -231,9 +226,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) { int ret = -EINVAL; - if (ff->func == fetch_argument) - ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data); - else if (ff->func == fetch_register) { + if (ff->func == fetch_register) { const char *name; name = regs_query_register_name((unsigned int)((long)ff->data)); ret = snprintf(buf, n, "%%%s", name); @@ -489,14 +482,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) } } else ret = -EINVAL; - } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) { - ret = strict_strtoul(arg + 3, 10, ¶m); - if (ret || param > PARAM_MAX_ARGS) - ret = -EINVAL; - else { - ff->func = fetch_argument; - ff->data = (void *)param; - } } else ret = -EINVAL; return ret; @@ -611,7 +596,6 @@ static int create_trace_probe(int argc, char **argv) * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] * Fetch args: - * $argN : fetch Nth of function argument. (N:0-) * $retval : fetch return value * $stack : fetch stack address * $stackN : fetch Nth of stack (N:0-) -- cgit v1.2.3-58-ga151 From aa5add93e92019018e905146f8c3d3f8e3c08300 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:46:56 -0500 Subject: x86/ptrace: Remove unused regs_get_argument_nth API Because of dropping function argument syntax from kprobe-tracer, we don't need this API anymore. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Roland McGrath Cc: Oleg Nesterov Cc: Mahesh Salgaonkar Cc: Benjamin Herrenschmidt Cc: Michael Neuling Cc: Steven Rostedt Cc: linuxppc-dev@ozlabs.org LKML-Reference: <20100105224656.19431.92588.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/ptrace.h | 4 ---- arch/x86/kernel/ptrace.c | 24 ------------------------ 2 files changed, 28 deletions(-) diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 9d369f680321..20102808b191 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -274,10 +274,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, return 0; } -/* Get Nth argument at function call */ -extern unsigned long regs_get_argument_nth(struct pt_regs *regs, - unsigned int n); - /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 017d937639fe..73554a3aae8c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -140,30 +140,6 @@ static const int arg_offs_table[] = { #endif }; -/** - * regs_get_argument_nth() - get Nth argument at function call - * @regs: pt_regs which contains registers at function entry. - * @n: argument number. - * - * regs_get_argument_nth() returns @n th argument of a function call. - * Since usually the kernel stack will be changed right after function entry, - * you must use this at function entry. If the @n th entry is NOT in the - * kernel stack or pt_regs, this returns 0. - */ -unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) -{ - if (n < ARRAY_SIZE(arg_offs_table)) - return *(unsigned long *)((char *)regs + arg_offs_table[n]); - else { - /* - * The typical case: arg n is on the stack. - * (Note: stack[0] = return address, so skip it) - */ - n -= ARRAY_SIZE(arg_offs_table); - return regs_get_kernel_stack_nth(regs, 1 + n); - } -} - /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. -- cgit v1.2.3-58-ga151 From bbaa46fac6d1c652bfa6282420d36a44bdc53b64 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:47:03 -0500 Subject: perf probe: Remove newline from die() Remove newline from die(), because it is automatically added. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20100105224703.19431.42475.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-finder.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 4b852c0d16a5..6402798337c8 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -402,11 +402,11 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf) } else if (op == DW_OP_regx) { regn = loc->lr_number; } else - die("Dwarf_OP %d is not supported.\n", op); + die("Dwarf_OP %d is not supported.", op); regs = get_arch_regstr(regn); if (!regs) - die("%lld exceeds max register number.\n", regn); + die("%lld exceeds max register number.", regn); if (deref) ret = snprintf(pf->buf, pf->len, @@ -438,7 +438,7 @@ static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf) return ; error: die("Failed to find the location of %s at this address.\n" - " Perhaps, it has been optimized out.\n", pf->var); + " Perhaps, it has been optimized out.", pf->var); } static int variable_callback(struct die_link *dlink, void *data) @@ -476,7 +476,7 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) /* Search child die for local variables and parameters. */ ret = search_die_from_children(sp_die, variable_callback, pf); if (!ret) - die("Failed to find '%s' in this function.\n", pf->var); + die("Failed to find '%s' in this function.", pf->var); } /* Get a frame base on the address */ @@ -602,7 +602,7 @@ static void find_by_line(struct probe_finder *pf) ret = search_die_from_children(pf->cu_die, probeaddr_callback, pf); if (ret == 0) - die("Probe point is not found in subprograms.\n"); + die("Probe point is not found in subprograms."); /* Continuing, because target line might be inlined. */ } dwarf_srclines_dealloc(__dw_debug, lines, cnt); @@ -661,7 +661,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) !die_inlined_subprogram(lk->die)) goto found; } - die("Failed to find real subprogram.\n"); + die("Failed to find real subprogram."); found: /* Get offset from subprogram */ ret = die_within_subprogram(lk->die, pf->addr, &offs); -- cgit v1.2.3-58-ga151 From 72041334b8c75ae7e1da2f17ba2b7afee8f2abd7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:47:10 -0500 Subject: perf probe: Show probe list in pager Show probe list in pager, because the list can be longer than a page. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20100105224710.19431.61542.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-event.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 8e532d9824f0..a22141a773bc 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -37,6 +37,7 @@ #include "string.h" #include "strlist.h" #include "debug.h" +#include "cache.h" #include "parse-events.h" /* For debugfs_path */ #include "probe-event.h" @@ -455,6 +456,8 @@ void show_perf_probe_events(void) struct strlist *rawlist; struct str_node *ent; + setup_pager(); + fd = open_kprobe_events(O_RDONLY, 0); rawlist = get_trace_kprobe_event_rawlist(fd); close(fd); -- cgit v1.2.3-58-ga151 From fb1d2edf7ee25a26ad0b238d0ee335a3b28b7aa3 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:47:17 -0500 Subject: perf tools: Support tracepoint glob matching Support glob wildcard when selecting tracepoint events by -e option. Without this patch, perf-tools supports 'GROUP:*:record' syntax for selecting all tracepoints under GROUP group. With this patch, user can choose tracepoints more flexibly by using partial wildcards, e.g. 'block:*bio*:record'. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20100105224717.19431.68972.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 609d5a9470c5..05d0c5c2030c 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -450,7 +450,8 @@ parse_single_tracepoint_event(char *sys_name, /* sys + ':' + event + ':' + flags*/ #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) static enum event_result -parse_subsystem_tracepoint_event(char *sys_name, char *flags) +parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp, + char *flags) { char evt_path[MAXPATHLEN]; struct dirent *evt_ent; @@ -474,6 +475,9 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags) || !strcmp(evt_ent->d_name, "filter")) continue; + if (!strglobmatch(evt_ent->d_name, evt_exp)) + continue; + len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name, evt_ent->d_name, flags ? ":" : "", flags ?: ""); @@ -522,9 +526,10 @@ static enum event_result parse_tracepoint_event(const char **strp, if (evt_length >= MAX_EVENT_LENGTH) return EVT_FAILED; - if (!strcmp(evt_name, "*")) { + if (strpbrk(evt_name, "*?")) { *strp = evt_name + evt_length; - return parse_subsystem_tracepoint_event(sys_name, flags); + return parse_multiple_tracepoint_event(sys_name, evt_name, + flags); } else return parse_single_tracepoint_event(sys_name, evt_name, evt_length, flags, -- cgit v1.2.3-58-ga151 From 6964cd2c8efe6e048401f1fe3952a06c563c34c1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 5 Jan 2010 17:47:24 -0500 Subject: perf tools: Enhance glob string matching Enhance strglobmatch() for supporting character classes([CHARS], complementation and ranges are also supported) and escaped special characters (\*, \? etc). Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20100105224724.19431.56271.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/util/string.c | 65 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 5352d7dccc61..c397d4f6f748 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -227,16 +227,73 @@ fail: return NULL; } -/* Glob expression pattern matching */ +/* Character class matching */ +static bool __match_charclass(const char *pat, char c, const char **npat) +{ + bool complement = false, ret = true; + + if (*pat == '!') { + complement = true; + pat++; + } + if (*pat++ == c) /* First character is special */ + goto end; + + while (*pat && *pat != ']') { /* Matching */ + if (*pat == '-' && *(pat + 1) != ']') { /* Range */ + if (*(pat - 1) <= c && c <= *(pat + 1)) + goto end; + if (*(pat - 1) > *(pat + 1)) + goto error; + pat += 2; + } else if (*pat++ == c) + goto end; + } + if (!*pat) + goto error; + ret = false; + +end: + while (*pat && *pat != ']') /* Searching closing */ + pat++; + if (!*pat) + goto error; + *npat = pat + 1; + return complement ? !ret : ret; + +error: + return false; +} + +/** + * strglobmatch - glob expression pattern matching + * @str: the target string to match + * @pat: the pattern string to match + * + * This returns true if the @str matches @pat. @pat can includes wildcards + * ('*','?') and character classes ([CHARS], complementation and ranges are + * also supported). Also, this supports escape character ('\') to use special + * characters as normal character. + * + * Note: if @pat syntax is broken, this always returns false. + */ bool strglobmatch(const char *str, const char *pat) { while (*str && *pat && *pat != '*') { - if (*pat == '?') { + if (*pat == '?') { /* Matches any single character */ str++; pat++; - } else - if (*str++ != *pat++) + continue; + } else if (*pat == '[') /* Character classes/Ranges */ + if (__match_charclass(pat + 1, *str, &pat)) { + str++; + continue; + } else return false; + else if (*pat == '\\') /* Escaped char match as normal char */ + pat++; + if (*str++ != *pat++) + return false; } /* Check wild card */ if (*pat == '*') { -- cgit v1.2.3-58-ga151 From 631c9def804b2c92b5cca04fb9ff7b5df9e35094 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 6 Jan 2010 09:45:34 -0500 Subject: perf probe: Support --line option to show probable source-code lines Add --line option to support showing probable source-code lines. perf probe --line SRC:LN[-LN|+NUM] or perf probe --line FUNC[:LN[-LN|+NUM]] This option shows source-code with line number if the line can be probed. Lines without line number (and blue color) means that the line can not be probed, because debuginfo doesn't have the information of those lines. The argument specifies the range of lines, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function. e.g. # ./perf probe --line kernel/sched.c:1080 * * called with rq->lock held and irqs disabled */ static void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; 1086 ktime_t time = ktime_add_ns(timer->base->get_time(), delay); hrtimer_set_expires(timer, time); 1090 if (rq == this_rq()) { 1091 hrtimer_restart(timer); 1092 } else if (!rq->hrtick_csd_pending) { 1093 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 1094 rq->hrtick_csd_pending = 1; If you specifying function name, this shows function-relative line number. # ./perf probe --line schedule asmlinkage void __sched schedule(void) 1 { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; need_resched: preempt_disable(); 9 cpu = smp_processor_id(); 10 rq = cpu_rq(cpu); 11 rcu_sched_qs(cpu); 12 prev = rq->curr; 13 switch_count = &prev->nivcsw; Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20100106144534.27218.77939.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-probe.txt | 20 ++++ tools/perf/builtin-probe.c | 76 ++++++++++--- tools/perf/util/probe-event.c | 100 +++++++++++++++++ tools/perf/util/probe-event.h | 2 + tools/perf/util/probe-finder.c | 191 +++++++++++++++++++++++++++++++- tools/perf/util/probe-finder.h | 31 ++++++ 6 files changed, 402 insertions(+), 18 deletions(-) diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 250e391b4bc8..2de34075f6a4 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -15,6 +15,8 @@ or 'perf probe' [options] --del='[GROUP:]EVENT' [...] or 'perf probe' --list +or +'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]' DESCRIPTION ----------- @@ -45,6 +47,11 @@ OPTIONS --list:: List up current probe events. +-L:: +--line=:: + Show source code lines which can be probed. This needs an argument + which specifies a range of the source code. + PROBE SYNTAX ------------ Probe points are defined by following syntax. @@ -56,6 +63,19 @@ Probe points are defined by following syntax. It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc). +LINE SYNTAX +----------- +Line range is descripted by following syntax. + + "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]" + +FUNC specifies the function name of showing lines. 'RLN' is the start line +number from function entry line, and 'RLN2' is the end line number. As same as +probe syntax, 'SRC' means the source file path, 'ALN' is start line number, +and 'ALN2' is end line number in the file. It is also possible to specify how +many lines to show by using 'NUM'. +So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function. + SEE ALSO -------- linkperf:perf-trace[1], linkperf:perf-record[1] diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index ffdd3fe87b4a..1d3a99ea5ce1 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -55,11 +55,13 @@ static struct { bool need_dwarf; bool list_events; bool force_add; + bool show_lines; int nr_probe; struct probe_point probes[MAX_PROBES]; struct strlist *dellist; struct perf_session *psession; struct map *kmap; + struct line_range line_range; } session; @@ -116,6 +118,15 @@ static int opt_del_probe_event(const struct option *opt __used, return 0; } +static int opt_show_lines(const struct option *opt __used, + const char *str, int unset __used) +{ + if (str) + parse_line_range_desc(str, &session.line_range); + INIT_LIST_HEAD(&session.line_range.line_list); + session.show_lines = true; + return 0; +} /* Currently just checking function name from symbol map */ static void evaluate_probe_point(struct probe_point *pp) { @@ -144,6 +155,7 @@ static const char * const probe_usage[] = { "perf probe [] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", "perf probe [] --del '[GROUP:]EVENT' ...", "perf probe --list", + "perf probe --line 'LINEDESC'", NULL }; @@ -182,9 +194,32 @@ static const struct option options[] = { opt_add_probe_event), OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events" " with existing name"), +#ifndef NO_LIBDWARF + OPT_CALLBACK('L', "line", NULL, + "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]", + "Show source code lines.", opt_show_lines), +#endif OPT_END() }; +/* Initialize symbol maps for vmlinux */ +static void init_vmlinux(void) +{ + symbol_conf.sort_by_name = true; + if (symbol_conf.vmlinux_name == NULL) + symbol_conf.try_vmlinux_path = true; + else + pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name); + if (symbol__init() < 0) + die("Failed to init symbol map."); + session.psession = perf_session__new(NULL, O_WRONLY, false); + if (session.psession == NULL) + die("Failed to init perf_session."); + session.kmap = session.psession->vmlinux_maps[MAP__FUNCTION]; + if (!session.kmap) + die("Could not find kernel map.\n"); +} + int cmd_probe(int argc, const char **argv, const char *prefix __used) { int i, ret; @@ -203,7 +238,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) parse_probe_event_argv(argc, argv); } - if ((!session.nr_probe && !session.dellist && !session.list_events)) + if ((!session.nr_probe && !session.dellist && !session.list_events && + !session.show_lines)) usage_with_options(probe_usage, options); if (debugfs_valid_mountpoint(debugfs_path) < 0) @@ -215,10 +251,34 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) " --add/--del.\n"); usage_with_options(probe_usage, options); } + if (session.show_lines) { + pr_warning(" Error: Don't use --list with --line.\n"); + usage_with_options(probe_usage, options); + } show_perf_probe_events(); return 0; } +#ifndef NO_LIBDWARF + if (session.show_lines) { + if (session.nr_probe != 0 || session.dellist) { + pr_warning(" Error: Don't use --line with" + " --add/--del.\n"); + usage_with_options(probe_usage, options); + } + init_vmlinux(); + fd = open_vmlinux(); + if (fd < 0) + die("Could not open debuginfo file."); + ret = find_line_range(fd, &session.line_range); + if (ret <= 0) + die("Source line is not found.\n"); + close(fd); + show_line_range(&session.line_range); + return 0; + } +#endif + if (session.dellist) { del_trace_kprobe_events(session.dellist); strlist__delete(session.dellist); @@ -226,18 +286,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) return 0; } - /* Initialize symbol maps for vmlinux */ - symbol_conf.sort_by_name = true; - if (symbol_conf.vmlinux_name == NULL) - symbol_conf.try_vmlinux_path = true; - if (symbol__init() < 0) - die("Failed to init symbol map."); - session.psession = perf_session__new(NULL, O_WRONLY, false); - if (session.psession == NULL) - die("Failed to init perf_session."); - session.kmap = session.psession->vmlinux_maps[MAP__FUNCTION]; - if (!session.kmap) - die("Could not find kernel map.\n"); + /* Add probes */ + init_vmlinux(); if (session.need_dwarf) #ifdef NO_LIBDWARF diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index a22141a773bc..71b0dd590a37 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -38,6 +38,7 @@ #include "strlist.h" #include "debug.h" #include "cache.h" +#include "color.h" #include "parse-events.h" /* For debugfs_path */ #include "probe-event.h" @@ -63,6 +64,42 @@ static int e_snprintf(char *str, size_t size, const char *format, ...) return ret; } +void parse_line_range_desc(const char *arg, struct line_range *lr) +{ + const char *ptr; + char *tmp; + /* + * + * SRC:SLN[+NUM|-ELN] + * FUNC[:SLN[+NUM|-ELN]] + */ + ptr = strchr(arg, ':'); + if (ptr) { + lr->start = (unsigned int)strtoul(ptr + 1, &tmp, 0); + if (*tmp == '+') + lr->end = lr->start + (unsigned int)strtoul(tmp + 1, + &tmp, 0); + else if (*tmp == '-') + lr->end = (unsigned int)strtoul(tmp + 1, &tmp, 0); + else + lr->end = 0; + pr_debug("Line range is %u to %u\n", lr->start, lr->end); + if (lr->end && lr->start > lr->end) + semantic_error("Start line must be smaller" + " than end line."); + if (*tmp != '\0') + semantic_error("Tailing with invalid character '%d'.", + *tmp); + tmp = strndup(arg, (ptr - arg)); + } else + tmp = strdup(arg); + + if (strchr(tmp, '.')) + lr->file = tmp; + else + lr->function = tmp; +} + /* Check the name is good for event/group */ static bool check_event_name(const char *name) { @@ -678,3 +715,66 @@ void del_trace_kprobe_events(struct strlist *dellist) close(fd); } +#define LINEBUF_SIZE 256 + +static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num) +{ + char buf[LINEBUF_SIZE]; + const char *color = PERF_COLOR_BLUE; + + if (fgets(buf, LINEBUF_SIZE, fp) == NULL) + goto error; + if (!skip) { + if (show_num) + fprintf(stdout, "%7u %s", l, buf); + else + color_fprintf(stdout, color, " %s", buf); + } + + while (strlen(buf) == LINEBUF_SIZE - 1 && + buf[LINEBUF_SIZE - 2] != '\n') { + if (fgets(buf, LINEBUF_SIZE, fp) == NULL) + goto error; + if (!skip) { + if (show_num) + fprintf(stdout, "%s", buf); + else + color_fprintf(stdout, color, "%s", buf); + } + } + return; +error: + if (feof(fp)) + die("Source file is shorter than expected."); + else + die("File read error: %s", strerror(errno)); +} + +void show_line_range(struct line_range *lr) +{ + unsigned int l = 1; + struct line_node *ln; + FILE *fp; + + setup_pager(); + + if (lr->function) + fprintf(stdout, "<%s:%d>\n", lr->function, + lr->start - lr->offset); + else + fprintf(stdout, "<%s:%d>\n", lr->file, lr->start); + + fp = fopen(lr->path, "r"); + if (fp == NULL) + die("Failed to open %s: %s", lr->path, strerror(errno)); + /* Skip to starting line number */ + while (l < lr->start) + show_one_line(fp, l++, true, false); + + list_for_each_entry(ln, &lr->line_list, list) { + while (ln->line > l) + show_one_line(fp, (l++) - lr->offset, false, false); + show_one_line(fp, (l++) - lr->offset, false, true); + } + fclose(fp); +} diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 7f1d499118c0..711287d4baea 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -5,6 +5,7 @@ #include "probe-finder.h" #include "strlist.h" +extern void parse_line_range_desc(const char *arg, struct line_range *lr); extern void parse_perf_probe_event(const char *str, struct probe_point *pp, bool *need_dwarf); extern int synthesize_perf_probe_point(struct probe_point *pp); @@ -15,6 +16,7 @@ extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes, bool force_add); extern void del_trace_kprobe_events(struct strlist *dellist); extern void show_perf_probe_events(void); +extern void show_line_range(struct line_range *lr); /* Maximum index number of event-name postfix */ #define MAX_EVENT_INDEX 1024 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 6402798337c8..1b2124d12f68 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -140,6 +140,31 @@ static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname) return found; } +static int cu_get_filename(Dwarf_Die cu_die, Dwarf_Unsigned fno, char **buf) +{ + Dwarf_Signed cnt, i; + char **srcs; + int ret = 0; + + if (!buf || !fno) + return -EINVAL; + + ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error); + if (ret == DW_DLV_OK) { + if ((Dwarf_Unsigned)cnt > fno - 1) { + *buf = strdup(srcs[fno - 1]); + ret = 0; + pr_debug("found filename: %s\n", *buf); + } else + ret = -ENOENT; + for (i = 0; i < cnt; i++) + dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING); + dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST); + } else + ret = -EINVAL; + return ret; +} + /* Compare diename and tname */ static int die_compare_name(Dwarf_Die dw_die, const char *tname) { @@ -567,7 +592,7 @@ static int probeaddr_callback(struct die_link *dlink, void *data) } /* Find probe point from its line number */ -static void find_by_line(struct probe_finder *pf) +static void find_probe_point_by_line(struct probe_finder *pf) { Dwarf_Signed cnt, i, clm; Dwarf_Line *lines; @@ -626,7 +651,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) pf->fno = die_get_decl_file(dlink->die); pf->lno = die_get_decl_line(dlink->die) + pp->line; - find_by_line(pf); + find_probe_point_by_line(pf); return 1; } if (die_inlined_subprogram(dlink->die)) { @@ -673,7 +698,7 @@ found: return 0; } -static void find_by_func(struct probe_finder *pf) +static void find_probe_point_by_func(struct probe_finder *pf) { search_die_from_children(pf->cu_die, probefunc_callback, pf); } @@ -714,10 +739,10 @@ int find_probepoint(int fd, struct probe_point *pp) if (ret == DW_DLV_NO_ENTRY) pf.cu_base = 0; if (pp->function) - find_by_func(&pf); + find_probe_point_by_func(&pf); else { pf.lno = pp->line; - find_by_line(&pf); + find_probe_point_by_line(&pf); } } dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE); @@ -728,3 +753,159 @@ int find_probepoint(int fd, struct probe_point *pp) return pp->found; } + +static void line_range_add_line(struct line_range *lr, unsigned int line) +{ + struct line_node *ln; + struct list_head *p; + + /* Reverse search, because new line will be the last one */ + list_for_each_entry_reverse(ln, &lr->line_list, list) { + if (ln->line < line) { + p = &ln->list; + goto found; + } else if (ln->line == line) /* Already exist */ + return ; + } + /* List is empty, or the smallest entry */ + p = &lr->line_list; +found: + pr_debug("Debug: add a line %u\n", line); + ln = zalloc(sizeof(struct line_node)); + DIE_IF(ln == NULL); + ln->line = line; + INIT_LIST_HEAD(&ln->list); + list_add(&ln->list, p); +} + +/* Find line range from its line number */ +static void find_line_range_by_line(struct line_finder *lf) +{ + Dwarf_Signed cnt, i; + Dwarf_Line *lines; + Dwarf_Unsigned lineno = 0; + Dwarf_Unsigned fno; + Dwarf_Addr addr; + int ret; + + ret = dwarf_srclines(lf->cu_die, &lines, &cnt, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + + for (i = 0; i < cnt; i++) { + ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + if (fno != lf->fno) + continue; + + ret = dwarf_lineno(lines[i], &lineno, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + if (lf->lno_s > lineno || lf->lno_e < lineno) + continue; + + /* Filter line in the function address range */ + if (lf->addr_s && lf->addr_e) { + ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + if (lf->addr_s > addr || lf->addr_e <= addr) + continue; + } + line_range_add_line(lf->lr, (unsigned int)lineno); + } + dwarf_srclines_dealloc(__dw_debug, lines, cnt); + if (!list_empty(&lf->lr->line_list)) + lf->found = 1; +} + +/* Search function from function name */ +static int linefunc_callback(struct die_link *dlink, void *data) +{ + struct line_finder *lf = (struct line_finder *)data; + struct line_range *lr = lf->lr; + Dwarf_Half tag; + int ret; + + ret = dwarf_tag(dlink->die, &tag, &__dw_error); + DIE_IF(ret == DW_DLV_ERROR); + if (tag == DW_TAG_subprogram && + die_compare_name(dlink->die, lr->function) == 0) { + /* Get the address range of this function */ + ret = dwarf_highpc(dlink->die, &lf->addr_e, &__dw_error); + if (ret == DW_DLV_OK) + ret = dwarf_lowpc(dlink->die, &lf->addr_s, &__dw_error); + DIE_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_NO_ENTRY) { + lf->addr_s = 0; + lf->addr_e = 0; + } + + lf->fno = die_get_decl_file(dlink->die); + lr->offset = die_get_decl_line(dlink->die);; + lf->lno_s = lr->offset + lr->start; + if (!lr->end) + lf->lno_e = (Dwarf_Unsigned)-1; + else + lf->lno_e = lr->offset + lr->end; + lr->start = lf->lno_s; + lr->end = lf->lno_e; + find_line_range_by_line(lf); + /* If we find a target function, this should be end. */ + lf->found = 1; + return 1; + } + return 0; +} + +static void find_line_range_by_func(struct line_finder *lf) +{ + search_die_from_children(lf->cu_die, linefunc_callback, lf); +} + +int find_line_range(int fd, struct line_range *lr) +{ + Dwarf_Half addr_size = 0; + Dwarf_Unsigned next_cuh = 0; + int ret; + struct line_finder lf = {.lr = lr}; + + ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); + if (ret != DW_DLV_OK) + return -ENOENT; + + while (!lf.found) { + /* Search CU (Compilation Unit) */ + ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL, + &addr_size, &next_cuh, &__dw_error); + DIE_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_NO_ENTRY) + break; + + /* Get the DIE(Debugging Information Entry) of this CU */ + ret = dwarf_siblingof(__dw_debug, 0, &lf.cu_die, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + + /* Check if target file is included. */ + if (lr->file) + lf.fno = cu_find_fileno(lf.cu_die, lr->file); + + if (!lr->file || lf.fno) { + if (lr->function) + find_line_range_by_func(&lf); + else { + lf.lno_s = lr->start; + if (!lr->end) + lf.lno_e = (Dwarf_Unsigned)-1; + else + lf.lno_e = lr->end; + find_line_range_by_line(&lf); + } + /* Get the real file path */ + if (lf.found) + cu_get_filename(lf.cu_die, lf.fno, &lr->path); + } + dwarf_dealloc(__dw_debug, lf.cu_die, DW_DLA_DIE); + } + ret = dwarf_finish(__dw_debug, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + return lf.found; +} + diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index e3f396806e6e..972b386116f1 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -34,8 +34,26 @@ struct probe_point { char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/ }; +/* Line number container */ +struct line_node { + struct list_head list; + unsigned int line; +}; + +/* Line range */ +struct line_range { + char *file; /* File name */ + char *function; /* Function name */ + unsigned int start; /* Start line number */ + unsigned int end; /* End line number */ + unsigned int offset; /* Start line offset */ + char *path; /* Real path name */ + struct list_head line_list; /* Visible lines */ +}; + #ifndef NO_LIBDWARF extern int find_probepoint(int fd, struct probe_point *pp); +extern int find_line_range(int fd, struct line_range *lr); /* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */ #ifndef _MIPS_SZLONG @@ -62,6 +80,19 @@ struct probe_finder { char *buf; /* Current output buffer */ int len; /* Length of output buffer */ }; + +struct line_finder { + struct line_range *lr; /* Target line range */ + + Dwarf_Unsigned fno; /* File number */ + Dwarf_Unsigned lno_s; /* Start line number */ + Dwarf_Unsigned lno_e; /* End line number */ + Dwarf_Addr addr_s; /* Start address */ + Dwarf_Addr addr_e; /* End address */ + Dwarf_Die cu_die; /* Current CU */ + int found; +}; + #endif /* NO_LIBDWARF */ #endif /*_PROBE_FINDER_H */ -- cgit v1.2.3-58-ga151 From 8d9e503928638fc95317be42c416fb7907322aff Mon Sep 17 00:00:00 2001 From: Alexander Beregalov Date: Thu, 7 Jan 2010 19:40:47 +0300 Subject: perf: Fix memory leak: counterwidth Signed-off-by: Alexander Beregalov Cc: a.p.zijlstra@chello.nl Cc: paulus@samba.org LKML-Reference: <1262882447-23776-2-git-send-email-a.beregalov@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/values.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c index 1c15e39f99e3..cfa55d686e3b 100644 --- a/tools/perf/util/values.c +++ b/tools/perf/util/values.c @@ -169,6 +169,7 @@ static void perf_read_values__display_pretty(FILE *fp, counterwidth[j], values->value[i][j]); fprintf(fp, "\n"); } + free(counterwidth); } static void perf_read_values__display_raw(FILE *fp, -- cgit v1.2.3-58-ga151 From fed5af61dc0d9402d26e7fb8fb9731a60a8e05ca Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 7 Jan 2010 19:59:38 -0200 Subject: perf buildid-list: No need to process the header sections again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As it is already processed by: perf_session__new perf_session__open perf_session__read This was harmless, because we use dsos__findnew, that would already find it, but is unnecessary work and removing it makes builtin-buildid-list.c even shorter. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262901583-8074-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-buildid-list.c | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 1e99ac806913..4229c2c213cc 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -31,26 +31,6 @@ static const struct option options[] = { OPT_END() }; -static int perf_file_section__process_buildids(struct perf_file_section *self, - int feat, int fd) -{ - if (feat != HEADER_BUILD_ID) - return 0; - - if (lseek(fd, self->offset, SEEK_SET) < 0) { - pr_warning("Failed to lseek to %Ld offset for buildids!\n", - self->offset); - return -1; - } - - if (perf_header__read_build_ids(fd, self->offset, self->size)) { - pr_warning("Failed to read buildids!\n"); - return -1; - } - - return 0; -} - static int __cmd_buildid_list(void) { int err = -1; @@ -60,10 +40,7 @@ static int __cmd_buildid_list(void) if (session == NULL) return -1; - err = perf_header__process_sections(&session->header, session->fd, - perf_file_section__process_buildids); - if (err >= 0) - dsos__fprintf_buildid(stdout); + dsos__fprintf_buildid(stdout); perf_session__delete(session); return err; -- cgit v1.2.3-58-ga151 From a89e5abe3efcc7facc666d3985769278937f86b0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 7 Jan 2010 19:59:39 -0200 Subject: perf symbols: Record the domain of DSOs in HEADER_BUILD_ID header table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that we can restore them to the right DSO list (either dsos__kernel or dsos__user). We do that just like the kernel does for the other events, encoding PERF_RECORD_MISC_{KERNEL,USER} in perf_event_header. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262901583-8074-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 9 ++++++--- tools/perf/util/session.c | 6 +++++- tools/perf/util/symbol.c | 6 +++--- tools/perf/util/symbol.h | 11 +++++++++-- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 942f7da8bf84..ec96321eb9e4 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -193,7 +193,7 @@ static int write_padded(int fd, const void *bf, size_t count, continue; \ else -static int __dsos__write_buildid_table(struct list_head *head, int fd) +static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd) { struct dso *pos; @@ -205,6 +205,7 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd) len = ALIGN(len, NAME_ALIGN); memset(&b, 0, sizeof(b)); memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); + b.header.misc = misc; b.header.size = sizeof(b) + len; err = do_write(fd, &b, sizeof(b)); if (err < 0) @@ -220,9 +221,11 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd) static int dsos__write_buildid_table(int fd) { - int err = __dsos__write_buildid_table(&dsos__kernel, fd); + int err = __dsos__write_buildid_table(&dsos__kernel, + PERF_RECORD_MISC_KERNEL, fd); if (err == 0) - err = __dsos__write_buildid_table(&dsos__user, fd); + err = __dsos__write_buildid_table(&dsos__user, + PERF_RECORD_MISC_USER, fd); return err; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e0e6a075489e..378ac5422bcf 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -255,6 +255,7 @@ int perf_header__read_build_ids(int input, u64 offset, u64 size) while (offset < limit) { struct dso *dso; ssize_t len; + struct list_head *head = &dsos__user; if (read(input, &bev, sizeof(bev)) != sizeof(bev)) goto out; @@ -263,7 +264,10 @@ int perf_header__read_build_ids(int input, u64 offset, u64 size) if (read(input, filename, len) != len) goto out; - dso = dsos__findnew(filename); + if (bev.header.misc & PERF_RECORD_MISC_KERNEL) + head = &dsos__kernel; + + dso = __dsos__findnew(head, filename); if (dso != NULL) dso__set_build_id(dso, &bev.build_id); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index da2f07f1af8f..8e6627e6b778 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1615,14 +1615,14 @@ static struct dso *dsos__find(struct list_head *head, const char *name) return NULL; } -struct dso *dsos__findnew(const char *name) +struct dso *__dsos__findnew(struct list_head *head, const char *name) { - struct dso *dso = dsos__find(&dsos__user, name); + struct dso *dso = dsos__find(head, name); if (!dso) { dso = dso__new(name); if (dso != NULL) { - dsos__add(&dsos__user, dso); + dsos__add(head, dso); dso__set_basename(dso); } } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index b2b5330a82a0..ee0b4593db7b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -115,9 +115,17 @@ bool dso__sorted_by_name(const struct dso *self, enum map_type type); void dso__sort_by_name(struct dso *self, enum map_type type); +extern struct list_head dsos__user, dsos__kernel; + +struct dso *__dsos__findnew(struct list_head *head, const char *name); + +static inline struct dso *dsos__findnew(const char *name) +{ + return __dsos__findnew(&dsos__user, name); +} + struct perf_session; -struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, struct perf_session *session, symbol_filter_t filter); void dsos__fprintf(FILE *fp); @@ -143,6 +151,5 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type); int perf_session__create_kernel_maps(struct perf_session *self); -extern struct list_head dsos__user, dsos__kernel; extern struct dso *vdso; #endif /* __PERF_SYMBOL */ -- cgit v1.2.3-58-ga151 From cf5531148ff34938840d6da775c0a4ace442d573 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 7 Jan 2010 19:59:40 -0200 Subject: perf tools: Create typedef for common event synthesizing callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1262901583-8074-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 16 +++++----------- tools/perf/util/event.h | 12 +++++------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1a31feb9999f..bfb3d872b9f5 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -8,8 +8,7 @@ #include "thread.h" static pid_t event__synthesize_comm(pid_t pid, int full, - int (*process)(event_t *event, - struct perf_session *session), + event__handler_t process, struct perf_session *session) { event_t ev; @@ -91,8 +90,7 @@ out_failure: } static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, - int (*process)(event_t *event, - struct perf_session *session), + event__handler_t process, struct perf_session *session) { char filename[PATH_MAX]; @@ -156,9 +154,7 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, return 0; } -int event__synthesize_thread(pid_t pid, - int (*process)(event_t *event, - struct perf_session *session), +int event__synthesize_thread(pid_t pid, event__handler_t process, struct perf_session *session) { pid_t tgid = event__synthesize_comm(pid, 1, process, session); @@ -167,8 +163,7 @@ int event__synthesize_thread(pid_t pid, return event__synthesize_mmap_events(pid, tgid, process, session); } -void event__synthesize_threads(int (*process)(event_t *event, - struct perf_session *session), +void event__synthesize_threads(event__handler_t process, struct perf_session *session) { DIR *proc; @@ -205,8 +200,7 @@ static int find_symbol_cb(void *arg, const char *name, char type, u64 start) return 1; } -int event__synthesize_kernel_mmap(int (*process)(event_t *event, - struct perf_session *session), +int event__synthesize_kernel_mmap(event__handler_t process, struct perf_session *session, const char *symbol_name) { diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 61fc0dc658c2..80356da8216c 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -103,15 +103,13 @@ void event__print_totals(void); struct perf_session; -int event__synthesize_thread(pid_t pid, - int (*process)(event_t *event, - struct perf_session *session), +typedef int (*event__handler_t)(event_t *event, struct perf_session *session); + +int event__synthesize_thread(pid_t pid, event__handler_t process, struct perf_session *session); -void event__synthesize_threads(int (*process)(event_t *event, - struct perf_session *session), +void event__synthesize_threads(event__handler_t process, struct perf_session *session); -int event__synthesize_kernel_mmap(int (*process)(event_t *event, - struct perf_session *session), +int event__synthesize_kernel_mmap(event__handler_t process, struct perf_session *session, const char *symbol_name); -- cgit v1.2.3-58-ga151 From 8381f65d097dad90416808314737dd7d3ae38ea9 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Fri, 8 Jan 2010 15:27:33 +0000 Subject: sched/perf: Make sure irqs are disabled for perf_event_task_sched_in() perf_event_task_sched_in() expects interrupts to be disabled, but on architectures with __ARCH_WANT_INTERRUPTS_ON_CTXSW defined, this isn't true. If this is defined, disable irqs around the call in finish_task_switch(). Signed-off-by: Jamie Iles Acked-by: Peter Zijlstra Cc: Russell King - ARM Linux LKML-Reference: <1262964453-27370-1-git-send-email-jamie.iles@picochip.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/sched.c b/kernel/sched.c index e507af086b42..c3ad3427a2a5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2783,7 +2783,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) */ prev_state = prev->state; finish_arch_switch(prev); +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_disable(); +#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ perf_event_task_sched_in(current); +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_enable(); +#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); -- cgit v1.2.3-58-ga151 From ff314d3903c2843de65c2148f66f277f2440ed26 Mon Sep 17 00:00:00 2001 From: Wenji Huang Date: Wed, 13 Jan 2010 17:01:38 +0800 Subject: perf: Make cmd_to_page() function more compact Remove branch for is_perf_command. Signed-off-by: Wenji Huang Cc: fweisbec@gmail.com Cc: jkacur@redhat.com Cc: acme@redhat.com LKML-Reference: <1263373298-13282-1-git-send-email-wenji.huang@oracle.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-help.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index e427d6965e0c..215b584007b1 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -313,8 +313,6 @@ static const char *cmd_to_page(const char *perf_cmd) return "perf"; else if (!prefixcmp(perf_cmd, "perf")) return perf_cmd; - else if (is_perf_command(perf_cmd)) - return prepend("perf-", perf_cmd); else return prepend("perf-", perf_cmd); } -- cgit v1.2.3-58-ga151 From b7cece76783c68fb391f9882235b4b0c9c300c46 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 13 Jan 2010 13:22:17 -0200 Subject: perf tools: Encode kernel module mappings in perf.data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We were always looking at the running machine /proc/modules, even when processing a perf.data file, which only makes sense when we're doing 'perf record' and 'perf report' on the same machine, and in close sucession, or if we don't use modules at all, right Peter? ;-) Now, at 'perf record' time we read /proc/modules, find the long path for modules, and put them as PERF_MMAP events, just like we did to encode the reloc reference symbol for vmlinux. Talking about that now it is encoded in .pgoff, so that we can use .{start,len} to store the address boundaries for the kernel so that when we reconstruct the kmaps tree we can do lookups right away, without having to fixup the end of the kernel maps like we did in the past (and now only in perf record). One more step in the 'perf archive' direction when we'll finally be able to collect data in one machine and analyse in another. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263396139-4798-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 5 ++ tools/perf/builtin-record.c | 11 +++++ tools/perf/builtin-top.c | 5 ++ tools/perf/util/event.c | 106 +++++++++++++++++++++++++++++++++++----- tools/perf/util/event.h | 2 + tools/perf/util/session.c | 8 +-- tools/perf/util/symbol.c | 116 ++++++++++++++++++++++++++++++++------------ tools/perf/util/symbol.h | 3 ++ tools/perf/util/thread.h | 4 ++ 9 files changed, 212 insertions(+), 48 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 88c570c18e3e..4af7199c5af7 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -494,6 +494,11 @@ static int __cmd_kmem(void) if (!perf_session__has_traces(session, "kmem record")) goto out_delete; + if (perf_session__create_kernel_maps(session) < 0) { + pr_err("Problems creating kernel maps\n"); + return -1; + } + setup_pager(); err = perf_session__process_events(session, &event_ops); if (err != 0) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8f88420e066b..c130df2676f1 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -465,6 +465,11 @@ static int __cmd_record(int argc, const char **argv) return -1; } + if (perf_session__create_kernel_maps(session) < 0) { + pr_err("Problems creating kernel maps\n"); + return -1; + } + if (!file_new) { err = perf_header__read(&session->header, output); if (err < 0) @@ -558,6 +563,12 @@ static int __cmd_record(int argc, const char **argv) return err; } + err = event__synthesize_modules(process_synthesized_event, session); + if (err < 0) { + pr_err("Couldn't record kernel reference relocation symbol.\n"); + return err; + } + if (!system_wide && profile_cpu == -1) event__synthesize_thread(pid, process_synthesized_event, session); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ddc584b64871..6822b44ca4f9 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1165,6 +1165,11 @@ static int __cmd_top(void) if (session == NULL) return -ENOMEM; + if (perf_session__create_kernel_maps(session) < 0) { + pr_err("Problems creating kernel maps\n"); + return -1; + } + if (target_pid != -1) event__synthesize_thread(target_pid, event__process, session); else diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index bfb3d872b9f5..4f3e7ef33b83 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -154,6 +154,36 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, return 0; } +int event__synthesize_modules(event__handler_t process, + struct perf_session *session) +{ + struct rb_node *nd; + + for (nd = rb_first(&session->kmaps.maps[MAP__FUNCTION]); + nd; nd = rb_next(nd)) { + event_t ev; + size_t size; + struct map *pos = rb_entry(nd, struct map, rb_node); + + if (pos->dso->kernel) + continue; + + size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); + memset(&ev, 0, sizeof(ev)); + ev.mmap.header.type = PERF_RECORD_MMAP; + ev.mmap.header.size = (sizeof(ev.mmap) - + (sizeof(ev.mmap.filename) - size)); + ev.mmap.start = pos->start; + ev.mmap.len = pos->end - pos->start; + + memcpy(ev.mmap.filename, pos->dso->long_name, + pos->dso->long_name_len + 1); + process(&ev, session); + } + + return 0; +} + int event__synthesize_thread(pid_t pid, event__handler_t process, struct perf_session *session) { @@ -222,7 +252,9 @@ int event__synthesize_kernel_mmap(event__handler_t process, "[kernel.kallsyms.%s]", symbol_name) + 1; size = ALIGN(size, sizeof(u64)); ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); - ev.mmap.start = args.start; + ev.mmap.pgoff = args.start; + ev.mmap.start = session->vmlinux_maps[MAP__FUNCTION]->start; + ev.mmap.len = session->vmlinux_maps[MAP__FUNCTION]->end - ev.mmap.start ; return process(&ev, session); } @@ -280,7 +312,6 @@ int event__process_mmap(event_t *self, struct perf_session *session) { struct thread *thread; struct map *map; - static const char kmmap_prefix[] = "[kernel.kallsyms."; dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", self->mmap.pid, self->mmap.tid, @@ -289,13 +320,61 @@ int event__process_mmap(event_t *self, struct perf_session *session) (void *)(long)self->mmap.pgoff, self->mmap.filename); - if (self->mmap.pid == 0 && - memcmp(self->mmap.filename, kmmap_prefix, - sizeof(kmmap_prefix) - 1) == 0) { - const char *symbol_name = (self->mmap.filename + - sizeof(kmmap_prefix) - 1); - perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name, - self->mmap.start); + if (self->mmap.pid == 0) { + static const char kmmap_prefix[] = "[kernel.kallsyms."; + + if (self->mmap.filename[0] == '/') { + char short_module_name[1024]; + char *name = strrchr(self->mmap.filename, '/'), *dot; + + if (name == NULL) + goto out_problem; + + ++name; /* skip / */ + dot = strrchr(name, '.'); + if (dot == NULL) + goto out_problem; + + snprintf(short_module_name, sizeof(short_module_name), + "[%.*s]", (int)(dot - name), name); + strxfrchar(short_module_name, '-', '_'); + + map = perf_session__new_module_map(session, + self->mmap.start, + short_module_name); + if (map == NULL) + goto out_problem; + + name = strdup(self->mmap.filename); + if (name == NULL) + goto out_problem; + + dso__set_long_name(map->dso, name); + map->end = map->start + self->mmap.len; + } else if (memcmp(self->mmap.filename, kmmap_prefix, + sizeof(kmmap_prefix) - 1) == 0) { + const char *symbol_name = (self->mmap.filename + + sizeof(kmmap_prefix) - 1); + /* + * Should be there already, from the build-id table in + * the header. + */ + struct dso *kernel = __dsos__findnew(&dsos__kernel, + "[kernel.kallsyms]"); + if (kernel == NULL) + goto out_problem; + + if (__map_groups__create_kernel_maps(&session->kmaps, + session->vmlinux_maps, + kernel) < 0) + goto out_problem; + + session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start; + session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; + + perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name, + self->mmap.pgoff); + } return 0; } @@ -304,10 +383,13 @@ int event__process_mmap(event_t *self, struct perf_session *session) session->cwd, session->cwdlen); if (thread == NULL || map == NULL) - dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); - else - thread__insert_map(thread, map); + goto out_problem; + + thread__insert_map(thread, map); + return 0; +out_problem: + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); return 0; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 80356da8216c..50a7132887f5 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -112,6 +112,8 @@ void event__synthesize_threads(event__handler_t process, int event__synthesize_kernel_mmap(event__handler_t process, struct perf_session *session, const char *symbol_name); +int event__synthesize_modules(event__handler_t process, + struct perf_session *session); int event__process_comm(event_t *self, struct perf_session *session); int event__process_lost(event_t *self, struct perf_session *session); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 378ac5422bcf..fd1c5a39a5bb 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -69,9 +69,6 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc self->unknown_events = 0; map_groups__init(&self->kmaps); - if (perf_session__create_kernel_maps(self) < 0) - goto out_delete; - if (mode == O_RDONLY && perf_session__open(self, force) < 0) goto out_delete; @@ -268,8 +265,11 @@ int perf_header__read_build_ids(int input, u64 offset, u64 size) head = &dsos__kernel; dso = __dsos__findnew(head, filename); - if (dso != NULL) + if (dso != NULL) { dso__set_build_id(dso, &bev.build_id); + if (head == &dsos__kernel && filename[0] == '[') + dso->kernel = 1; + } offset += bev.header.size; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8e6627e6b778..381999dd5c1f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -161,7 +161,7 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) self->start, self->end, self->name); } -static void dso__set_long_name(struct dso *self, char *name) +void dso__set_long_name(struct dso *self, char *name) { if (name == NULL) return; @@ -176,7 +176,7 @@ static void dso__set_basename(struct dso *self) struct dso *dso__new(const char *name) { - struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); + struct dso *self = zalloc(sizeof(*self) + strlen(name) + 1); if (self != NULL) { int i; @@ -500,13 +500,17 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, *module++ = '\0'; - if (strcmp(self->name, module)) { + if (strcmp(curr_map->dso->short_name, module)) { curr_map = map_groups__find_by_name(&session->kmaps, map->type, module); if (curr_map == NULL) { pr_debug("/proc/{kallsyms,modules} " - "inconsistency!\n"); + "inconsistency while looking " + "for \"%s\" module!\n", module); return -1; } + + if (curr_map->dso->loaded) + goto discard_symbol; } /* * So that we look just like we get from .ko files, @@ -1343,13 +1347,33 @@ struct map *map_groups__find_by_name(struct map_groups *self, for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); - if (map->dso && strcmp(map->dso->name, name) == 0) + if (map->dso && strcmp(map->dso->short_name, name) == 0) return map; } return NULL; } +static int dso__kernel_module_get_build_id(struct dso *self) +{ + char filename[PATH_MAX]; + /* + * kernel module short names are of the form "[module]" and + * we need just "module" here. + */ + const char *name = self->short_name + 1; + + snprintf(filename, sizeof(filename), + "/sys/module/%.*s/notes/.note.gnu.build-id", + (int)strlen(name - 1), name); + + if (sysfs__read_build_id(filename, self->build_id, + sizeof(self->build_id)) == 0) + self->has_build_id = true; + + return 0; +} + static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname) { struct dirent *dent; @@ -1395,6 +1419,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d if (long_name == NULL) goto failure; dso__set_long_name(map->dso, long_name); + dso__kernel_module_get_build_id(map->dso); } } @@ -1437,6 +1462,24 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) return self; } +struct map *perf_session__new_module_map(struct perf_session *self, u64 start, + const char *filename) +{ + struct map *map; + struct dso *dso = __dsos__findnew(&dsos__kernel, filename); + + if (dso == NULL) + return NULL; + + map = map__new2(start, dso, MAP__FUNCTION); + if (map == NULL) + return NULL; + + dso->origin = DSO__ORIG_KMODULE; + map_groups__insert(&self->kmaps, map); + return map; +} + static int perf_session__create_module_maps(struct perf_session *self) { char *line = NULL; @@ -1450,7 +1493,6 @@ static int perf_session__create_module_maps(struct perf_session *self) while (!feof(file)) { char name[PATH_MAX]; u64 start; - struct dso *dso; char *sep; int line_len; @@ -1476,26 +1518,10 @@ static int perf_session__create_module_maps(struct perf_session *self) *sep = '\0'; snprintf(name, sizeof(name), "[%s]", line); - dso = dso__new(name); - - if (dso == NULL) - goto out_delete_line; - - map = map__new2(start, dso, MAP__FUNCTION); - if (map == NULL) { - dso__delete(dso); + map = perf_session__new_module_map(self, start, name); + if (map == NULL) goto out_delete_line; - } - - snprintf(name, sizeof(name), - "/sys/module/%s/notes/.note.gnu.build-id", line); - if (sysfs__read_build_id(name, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; - - dso->origin = DSO__ORIG_KMODULE; - map_groups__insert(&self->kmaps, map); - dsos__add(&dsos__kernel, dso); + dso__kernel_module_get_build_id(map->dso); } free(line); @@ -1573,10 +1599,28 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } } + /* + * Say the kernel DSO was created when processing the build-id header table, + * we have a build-id, so check if it is the same as the running kernel, + * using it if it is. + */ + if (self->has_build_id) { + u8 kallsyms_build_id[BUILD_ID_SIZE]; + + if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, + sizeof(kallsyms_build_id)) == 0) + + is_kallsyms = dso__build_id_equal(self, kallsyms_build_id); + if (is_kallsyms) + goto do_kallsyms; + goto do_vmlinux; + } + is_kallsyms = self->long_name[0] == '['; if (is_kallsyms) goto do_kallsyms; +do_vmlinux: err = dso__load_vmlinux(self, map, session, self->long_name, filter); if (err <= 0) { pr_info("The file %s cannot be used, " @@ -1694,16 +1738,12 @@ out_delete_kernel_dso: return NULL; } -static int map_groups__create_kernel_maps(struct map_groups *self, - struct map *vmlinux_maps[MAP__NR_TYPES], - const char *vmlinux) +int __map_groups__create_kernel_maps(struct map_groups *self, + struct map *vmlinux_maps[MAP__NR_TYPES], + struct dso *kernel) { - struct dso *kernel = dsos__create_kernel(vmlinux); enum map_type type; - if (kernel == NULL) - return -1; - for (type = 0; type < MAP__NR_TYPES; ++type) { vmlinux_maps[type] = map__new2(0, kernel, type); if (vmlinux_maps[type] == NULL) @@ -1717,6 +1757,18 @@ static int map_groups__create_kernel_maps(struct map_groups *self, return 0; } +static int map_groups__create_kernel_maps(struct map_groups *self, + struct map *vmlinux_maps[MAP__NR_TYPES], + const char *vmlinux) +{ + struct dso *kernel = dsos__create_kernel(vmlinux); + + if (kernel == NULL) + return -1; + + return __map_groups__create_kernel_maps(self, vmlinux_maps, kernel); +} + static void vmlinux_path__exit(void) { while (--vmlinux_path__nr_entries >= 0) { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ee0b4593db7b..594156e43b10 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -134,6 +134,7 @@ size_t dsos__fprintf_buildid(FILE *fp); size_t dso__fprintf_buildid(struct dso *self, FILE *fp); size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); char dso__symtab_origin(const struct dso *self); +void dso__set_long_name(struct dso *self, char *name); void dso__set_build_id(struct dso *self, void *build_id); struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, @@ -151,5 +152,7 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type); int perf_session__create_kernel_maps(struct perf_session *self); +struct map *perf_session__new_module_map(struct perf_session *self, u64 start, + const char *filename); extern struct dso *vdso; #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index c206f72c8881..c06c13535a70 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -67,4 +67,8 @@ map_groups__find_function(struct map_groups *self, struct perf_session *session, struct map *map_groups__find_by_name(struct map_groups *self, enum map_type type, const char *name); + +int __map_groups__create_kernel_maps(struct map_groups *self, + struct map *vmlinux_maps[MAP__NR_TYPES], + struct dso *kernel); #endif /* __PERF_THREAD_H */ -- cgit v1.2.3-58-ga151 From 0895cf0a823e03ea6d79736611e90186006c805e Mon Sep 17 00:00:00 2001 From: Kirill Smelkov Date: Wed, 13 Jan 2010 13:22:18 -0200 Subject: perf: Fix few typos + cosmetics Signed-off-by: Kirill Smelkov Signed-off-by: Arnaldo Carvalho de Melo LKML-Reference: <1263396139-4798-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf.txt | 2 +- tools/perf/design.txt | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt index 69c832557199..0eeb247dc7d2 100644 --- a/tools/perf/Documentation/perf.txt +++ b/tools/perf/Documentation/perf.txt @@ -12,7 +12,7 @@ SYNOPSIS DESCRIPTION ----------- -Performance counters for Linux are are a new kernel-based subsystem +Performance counters for Linux are a new kernel-based subsystem that provide a framework for all things performance analysis. It covers hardware level (CPU/PMU, Performance Monitoring Unit) features and software features (software counters, tracepoints) as well. diff --git a/tools/perf/design.txt b/tools/perf/design.txt index 8d0de5130db3..bd0bb1b1279b 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -101,10 +101,10 @@ enum hw_event_ids { */ PERF_COUNT_HW_CPU_CYCLES = 0, PERF_COUNT_HW_INSTRUCTIONS = 1, - PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_REFERENCES = 2, PERF_COUNT_HW_CACHE_MISSES = 3, PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, - PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, }; @@ -131,8 +131,8 @@ software events, selected by 'event_id': */ enum sw_event_ids { PERF_COUNT_SW_CPU_CLOCK = 0, - PERF_COUNT_SW_TASK_CLOCK = 1, - PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, PERF_COUNT_SW_CONTEXT_SWITCHES = 3, PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, -- cgit v1.2.3-58-ga151 From 66aeb6d5cb701aedd508187e08612bfd1e108e2e Mon Sep 17 00:00:00 2001 From: Kirill Smelkov Date: Wed, 13 Jan 2010 13:22:19 -0200 Subject: perf top: Fix code typo in prompt_symbol() sym_filter is what was (if ever) passed with -s option. What was typed by user, and what we were looking for, is in buf. Signed-off-by: Kirill Smelkov Signed-off-by: Arnaldo Carvalho de Melo LKML-Reference: <1263396139-4798-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6822b44ca4f9..7a8a77ec2c9d 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -667,7 +667,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg) } if (!found) { - fprintf(stderr, "Sorry, %s is not active.\n", sym_filter); + fprintf(stderr, "Sorry, %s is not active.\n", buf); sleep(1); return; } else -- cgit v1.2.3-58-ga151 From 0d755034dbd01e240eadf2d31f4f75d3088ccd21 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 12:23:09 -0200 Subject: perf tools: Don't cast RIP to pointers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since they can come from another architecture with bigger pointers, i.e. processing a 64-bit perf.data on a 32-bit arch. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263478990-8200-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-diff.c | 4 ++-- tools/perf/builtin-kmem.c | 7 ++----- tools/perf/builtin-report.c | 7 ++----- tools/perf/builtin-sched.c | 7 ++----- tools/perf/builtin-trace.c | 7 ++----- tools/perf/util/event.c | 9 +++------ tools/perf/util/session.c | 16 ++++++---------- 8 files changed, 21 insertions(+), 40 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 117bbae844bf..73c202ee0882 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -132,8 +132,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) { struct addr_location al; - dump_printf("(IP, %d): %d: %p\n", event->header.misc, - event->ip.pid, (void *)(long)event->ip.ip); + dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc, + event->ip.pid, event->ip.ip); if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 924bfb77a6ab..18b3f505f9db 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -42,8 +42,8 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi struct addr_location al; struct sample_data data = { .period = 1, }; - dump_printf("(IP, %d): %d: %p\n", event->header.misc, - event->ip.pid, (void *)(long)event->ip.ip); + dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc, + event->ip.pid, event->ip.ip); if (event__preprocess_sample(event, session, &al, NULL) < 0) { pr_warning("problem processing %d event, skipping it.\n", diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 4af7199c5af7..7323d9dfbce8 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -316,11 +316,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) event__parse_sample(event, session->sample_type, &data); - dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", - event->header.misc, - data.pid, data.tid, - (void *)(long)data.ip, - (long long)data.period); + dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, + data.pid, data.tid, data.ip, data.period); thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 80d691a4191f..4c3d6997995b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -93,11 +93,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) event__parse_sample(event, session->sample_type, &data); - dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", - event->header.misc, - data.pid, data.tid, - (void *)(long)data.ip, - (long long)data.period); + dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, + data.pid, data.tid, data.ip, data.period); if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { unsigned int i; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 702322f8fec1..4f5a03e43444 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1621,11 +1621,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) event__parse_sample(event, session->sample_type, &data); - dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", - event->header.misc, - data.pid, data.tid, - (void *)(long)data.ip, - (long long)data.period); + dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, + data.pid, data.tid, data.ip, data.period); thread = perf_session__findnew(session, data.pid); if (thread == NULL) { diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 1831434aa938..8e9cbfe608d6 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -75,11 +75,8 @@ static int process_sample_event(event_t *event, struct perf_session *session) event__parse_sample(event, session->sample_type, &data); - dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", - event->header.misc, - data.pid, data.tid, - (void *)(long)data.ip, - (long long)data.period); + dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc, + data.pid, data.tid, data.ip, data.period); thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 4f3e7ef33b83..24ec5be4a1c0 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -313,12 +313,9 @@ int event__process_mmap(event_t *self, struct perf_session *session) struct thread *thread; struct map *map; - dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", - self->mmap.pid, self->mmap.tid, - (void *)(long)self->mmap.start, - (void *)(long)self->mmap.len, - (void *)(long)self->mmap.pgoff, - self->mmap.filename); + dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", + self->mmap.pid, self->mmap.tid, self->mmap.start, + self->mmap.len, self->mmap.pgoff, self->mmap.filename); if (self->mmap.pid == 0) { static const char kmmap_prefix[] = "[kernel.kallsyms."; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index fd1c5a39a5bb..e3ccdb46d6c4 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -209,9 +209,8 @@ static int perf_session__process_event(struct perf_session *self, trace_event(event); if (event->header.type < PERF_RECORD_MAX) { - dump_printf("%p [%p]: PERF_RECORD_%s", - (void *)(offset + head), - (void *)(long)(event->header.size), + dump_printf("%#lx [%#x]: PERF_RECORD_%s", + offset + head, event->header.size, event__name[event->header.type]); ++event__total[0]; ++event__total[event->header.type]; @@ -362,16 +361,13 @@ more: size = event->header.size; - dump_printf("\n%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); + dump_printf("\n%#lx [%#x]: event: %d\n", + offset + head, event->header.size, event->header.type); if (size == 0 || perf_session__process_event(self, event, ops, offset, head) < 0) { - dump_printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), + dump_printf("%#lx [%#x]: skipping unknown header type: %d\n", + offset + head, event->header.size, event->header.type); /* * assume we lost track of the stream, check alignment, and -- cgit v1.2.3-58-ga151 From ba21594cddee0a3af582971656702b1c4509d8f5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 12:23:10 -0200 Subject: perf tools: Cross platform perf.data analysis support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are still some problems related to loading vmlinux files, but those are unrelated to the feature implemented in this patch, so will get fixed in the next patches, but here are some results: 1. collect perf.data file on a Fedora 12 machine, x86_64, 64-bit userland 2. transfer it to a Debian Testing machine, PARISC64, 32-bit userland acme@parisc:~/git/linux-2.6-tip$ perf buildid-list | head -5 74f9930ee94475b6b3238caf3725a50d59cb994b [kernel.kallsyms] 55fdd56670453ea66c011158c4b9d30179c1d049 /lib/modules/2.6.33-rc4-tip+/kernel/net/ipv4/netfilter/ipt_MASQUERADE.ko 41adff63c730890480980d5d8ba513f1c216a858 /lib/modules/2.6.33-rc4-tip+/kernel/net/ipv4/netfilter/iptable_nat.ko 90a33def1077bb8e97b8a78546dc96c2de62df46 /lib/modules/2.6.33-rc4-tip+/kernel/net/ipv4/netfilter/nf_nat.ko 984c7bea90ce1376d5c8e7ef43a781801286e62d /lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/tun.ko acme@parisc:~/git/linux-2.6-tip$ perf buildid-list | tail -5 22492f3753c6a67de5c7ccbd6b863390c92c0723 /usr/lib64/libXt.so.6.0.0 353802bb7e1b895ba43507cc678f951e778e4c6f /usr/lib64/libMagickCore.so.2.0.0 d10c2897558595efe7be8b0584cf7e6398bc776c /usr/lib64/libfprint.so.0.0.0 a83ecfb519a788774a84d5ddde633c9ba56c03ab /home/acme/bin/perf d3ca765a8ecf257d263801d7ad8c49c189082317 /usr/lib64/libdwarf.so.0.0 acme@parisc:~/git/linux-2.6-tip$ acme@parisc:~/git/linux-2.6-tip$ perf report --sort comm The file [kernel.kallsyms] cannot be used, trying to use /proc/kallsyms... ^^^^ The problem related to vmlinux handling, it shouldn't be trying this ^^^^ rather alien /proc/kallsyms at all... /lib64/libpthread-2.10.2.so with build id 5c68f7afeb33309c78037e374b0deee84dd441f6 not found, continuing without symbols /lib64/libc-2.10.2.so with build id eb4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 not found, continuing without symbols /home/acme/bin/perf with build id a83ecfb519a788774a84d5ddde633c9ba56c03ab not found, continuing without symbols /usr/sbin/openvpn with build id f2037a091ef36b591187a858d75e203690ea9409 not found, continuing without symbols Failed to open /lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/e1000e/e1000e.ko, continuing without symbols Failed to open /lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/wireless/iwlwifi/iwlcore.ko, continuing without symbols # Samples: 293085637 # # Overhead Command # ........ ............... # 61.70% find 23.50% perf 5.86% swapper 3.12% sshd 2.39% init 0.87% bash 0.86% sleep 0.59% dbus-daemon 0.25% hald 0.24% NetworkManager 0.19% hald-addon-rfki 0.15% openvpn 0.07% phy0 0.07% events/0 0.05% iwl3945 0.05% events/1 0.03% kondemand/0 acme@parisc:~/git/linux-2.6-tip$ Which matches what we get when running the same command for the same perf.data file on the F12, x86_64, source machine: [root@doppio linux-2.6-tip]# perf report --sort comm # Samples: 293085637 # # Overhead Command # ........ ............... # 61.70% find 23.50% perf 5.86% swapper 3.12% sshd 2.39% init 0.87% bash 0.86% sleep 0.59% dbus-daemon 0.25% hald 0.24% NetworkManager 0.19% hald-addon-rfki 0.15% openvpn 0.07% phy0 0.07% events/0 0.05% iwl3945 0.05% events/1 0.03% kondemand/0 [root@doppio linux-2.6-tip]# The other modes work as well, modulo the problem with vmlinux: acme@parisc:~/git/linux-2.6-tip$ perf report --sort comm,dso 2> /dev/null | head -15 # Samples: 293085637 # # Overhead Command Shared Object # ........ ............... ................................. # 35.11% find ffffffff81002b5a 18.25% perf ffffffff8102235f 16.17% find libc-2.10.2.so 9.07% find find 5.80% swapper ffffffff8102235f 3.95% perf libc-2.10.2.so 2.33% init ffffffff810091b9 1.65% sshd libcrypto.so.0.9.8k 1.35% find [e1000e] 0.68% sleep libc-2.10.2.so acme@parisc:~/git/linux-2.6-tip$ And the lack of the right buildids: acme@parisc:~/git/linux-2.6-tip$ perf report --sort comm,dso,symbol 2> /dev/null | head -15 # Samples: 293085637 # # Overhead Command Shared Object Symbol # ........ ............... ................................. ...... # 35.11% find ffffffff81002b5a [k] 0xffffffff81002b5a 18.25% perf ffffffff8102235f [k] 0xffffffff8102235f 16.17% find libc-2.10.2.so [.] 0x00000000045782 9.07% find find [.] 0x0000000000fb0e 5.80% swapper ffffffff8102235f [k] 0xffffffff8102235f 3.95% perf libc-2.10.2.so [.] 0x0000000007f398 2.33% init ffffffff810091b9 [k] 0xffffffff810091b9 1.65% sshd libcrypto.so.0.9.8k [.] 0x00000000105440 1.35% find [e1000e] [k] 0x00000000010948 0.68% sleep libc-2.10.2.so [.] 0x0000000011ad5b acme@parisc:~/git/linux-2.6-tip$ But if we: acme@parisc:~/git/linux-2.6-tip$ ls ~/.debug ls: cannot access /home/acme/.debug: No such file or directory acme@parisc:~/git/linux-2.6-tip$ mkdir -p ~/.debug/lib64/libc-2.10.2.so/ acme@parisc:~/git/linux-2.6-tip$ scp doppio:.debug/lib64/libc-2.10.2.so/* ~/.debug/lib64/libc-2.10.2.so/ acme@doppio's password: eb4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 100% 1783KB 714.7KB/s 00:02 acme@parisc:~/git/linux-2.6-tip$ mkdir -p ~/.debug/.build-id/eb acme@parisc:~/git/linux-2.6-tip$ ln -s ../../lib64/libc-2.10.2.so/eb4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 ~/.debug/.build-id/eb/4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 acme@parisc:~/git/linux-2.6-tip$ perf report --dsos libc-2.10.2.so 2> /dev/null # dso: libc-2.10.2.so # Samples: 64281170 # # Overhead Command Symbol # ........ ............... ...... # 14.98% perf [.] __GI_strcmp 12.30% find [.] __GI_memmove 9.25% find [.] _int_malloc 7.60% find [.] _IO_vfprintf_internal 6.10% find [.] _IO_new_file_xsputn 6.02% find [.] __GI_close 3.08% find [.] _IO_file_overflow_internal 3.08% find [.] malloc_consolidate 3.08% find [.] _int_free 3.08% find [.] __strchrnul 3.08% find [.] __getdents64 3.08% find [.] __write_nocancel 3.08% sleep [.] __GI__dl_addr 3.08% sshd [.] __libc_select 3.08% find [.] _IO_new_file_write 3.07% find [.] _IO_new_do_write 3.06% find [.] __GI___errno_location 3.05% find [.] __GI___libc_malloc 3.04% perf [.] __GI_memcpy 1.71% find [.] __fprintf_chk 1.29% bash [.] __gconv_transform_utf8_internal 0.79% dbus-daemon [.] __GI_strlen # # (For a higher level overview, try: perf report --sort comm,dso) # acme@parisc:~/git/linux-2.6-tip$ Which matches what we get on the source, F12, x86_64 machine: [root@doppio linux-2.6-tip]# perf report --dsos libc-2.10.2.so # dso: libc-2.10.2.so # Samples: 64281170 # # Overhead Command Symbol # ........ ............... ...... # 14.98% perf [.] __GI_strcmp 12.30% find [.] __GI_memmove 9.25% find [.] _int_malloc 7.60% find [.] _IO_vfprintf_internal 6.10% find [.] _IO_new_file_xsputn 6.02% find [.] __GI_close 3.08% find [.] _IO_file_overflow_internal 3.08% find [.] malloc_consolidate 3.08% find [.] _int_free 3.08% find [.] __strchrnul 3.08% find [.] __getdents64 3.08% find [.] __write_nocancel 3.08% sleep [.] __GI__dl_addr 3.08% sshd [.] __libc_select 3.08% find [.] _IO_new_file_write 3.07% find [.] _IO_new_do_write 3.06% find [.] __GI___errno_location 3.05% find [.] __GI___libc_malloc 3.04% perf [.] __GI_memcpy 1.71% find [.] __fprintf_chk 1.29% bash [.] __gconv_transform_utf8_internal 0.79% dbus-daemon [.] __GI_strlen # # (For a higher level overview, try: perf report --sort comm,dso) # [root@doppio linux-2.6-tip]# So I think this is really, really nice in that it demonstrates the portability of perf.data files and the use of build-ids accross such aliens worlds :-) There are some things to fix tho, like the bitmap on the header, but things are looking good. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263478990-8200-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 63 +++++++++++++++++++++------ tools/perf/util/header.h | 2 + tools/perf/util/session.c | 108 +++++++++++++++++++++++++++++++++++++++++----- tools/perf/util/session.h | 7 ++- 4 files changed, 157 insertions(+), 23 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ec96321eb9e4..b31e0ae4b8db 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1,8 +1,10 @@ #include +#include #include #include #include #include +#include #include "util.h" #include "header.h" @@ -464,8 +466,21 @@ static int do_read(int fd, void *buf, size_t size) return 0; } +static int perf_header__getbuffer64(struct perf_header *self, + int fd, void *buf, size_t size) +{ + if (do_read(fd, buf, size)) + return -1; + + if (self->needs_swap) + mem_bswap_64(buf, size); + + return 0; +} + int perf_header__process_sections(struct perf_header *self, int fd, int (*process)(struct perf_file_section *self, + struct perf_header *ph, int feat, int fd)) { struct perf_file_section *feat_sec; @@ -486,7 +501,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, lseek(fd, self->data_offset + self->data_size, SEEK_SET); - if (do_read(fd, feat_sec, sec_size)) + if (perf_header__getbuffer64(self, fd, feat_sec, sec_size)) goto out_free; err = 0; @@ -494,7 +509,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, if (perf_header__has_feat(self, feat)) { struct perf_file_section *sec = &feat_sec[idx++]; - err = process(sec, feat, fd); + err = process(sec, self, feat, fd); if (err < 0) break; } @@ -511,10 +526,20 @@ int perf_file_header__read(struct perf_file_header *self, lseek(fd, 0, SEEK_SET); if (do_read(fd, self, sizeof(*self)) || - self->magic != PERF_MAGIC || - self->attr_size != sizeof(struct perf_file_attr)) + memcmp(&self->magic, __perf_magic, sizeof(self->magic))) return -1; + if (self->attr_size != sizeof(struct perf_file_attr)) { + u64 attr_size = bswap_64(self->attr_size); + + if (attr_size != sizeof(struct perf_file_attr)) + return -1; + + mem_bswap_64(self, offsetof(struct perf_file_header, + adds_features)); + ph->needs_swap = true; + } + if (self->size != sizeof(*self)) { /* Support the previous format */ if (self->size == offsetof(typeof(*self), adds_features)) @@ -524,16 +549,28 @@ int perf_file_header__read(struct perf_file_header *self, } memcpy(&ph->adds_features, &self->adds_features, - sizeof(self->adds_features)); + sizeof(ph->adds_features)); + /* + * FIXME: hack that assumes that if we need swap the perf.data file + * may be coming from an arch with a different word-size, ergo different + * DEFINE_BITMAP format, investigate more later, but for now its mostly + * safe to assume that we have a build-id section. Trace files probably + * have several other issues in this realm anyway... + */ + if (ph->needs_swap) { + memset(&ph->adds_features, 0, sizeof(ph->adds_features)); + perf_header__set_feat(ph, HEADER_BUILD_ID); + } ph->event_offset = self->event_types.offset; - ph->event_size = self->event_types.size; - ph->data_offset = self->data.offset; + ph->event_size = self->event_types.size; + ph->data_offset = self->data.offset; ph->data_size = self->data.size; return 0; } static int perf_file_section__process(struct perf_file_section *self, + struct perf_header *ph, int feat, int fd) { if (lseek(fd, self->offset, SEEK_SET) < 0) { @@ -548,7 +585,7 @@ static int perf_file_section__process(struct perf_file_section *self, break; case HEADER_BUILD_ID: - if (perf_header__read_build_ids(fd, self->offset, self->size)) + if (perf_header__read_build_ids(ph, fd, self->offset, self->size)) pr_debug("Failed to read buildids, continuing...\n"); break; default: @@ -560,7 +597,7 @@ static int perf_file_section__process(struct perf_file_section *self, int perf_header__read(struct perf_header *self, int fd) { - struct perf_file_header f_header; + struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; int nr_attrs, nr_ids, i, j; @@ -577,8 +614,9 @@ int perf_header__read(struct perf_header *self, int fd) struct perf_header_attr *attr; off_t tmp; - if (do_read(fd, &f_attr, sizeof(f_attr))) + if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr))) goto out_errno; + tmp = lseek(fd, 0, SEEK_CUR); attr = perf_header_attr__new(&f_attr.attr); @@ -589,7 +627,7 @@ int perf_header__read(struct perf_header *self, int fd) lseek(fd, f_attr.ids.offset, SEEK_SET); for (j = 0; j < nr_ids; j++) { - if (do_read(fd, &f_id, sizeof(f_id))) + if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id))) goto out_errno; if (perf_header_attr__add_id(attr, f_id) < 0) { @@ -610,7 +648,8 @@ int perf_header__read(struct perf_header *self, int fd) events = malloc(f_header.event_types.size); if (events == NULL) return -ENOMEM; - if (do_read(fd, events, f_header.event_types.size)) + if (perf_header__getbuffer64(self, fd, events, + f_header.event_types.size)) goto out_errno; event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 2b69aab67e35..ccc8540feccd 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -52,6 +52,7 @@ struct perf_header { u64 data_size; u64 event_offset; u64 event_size; + bool needs_swap; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; @@ -80,6 +81,7 @@ bool perf_header__has_feat(const struct perf_header *self, int feat); int perf_header__process_sections(struct perf_header *self, int fd, int (*process)(struct perf_file_section *self, + struct perf_header *ph, int feat, int fd)); #endif /* __PERF_HEADER_H */ diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e3ccdb46d6c4..604e14f6a6f9 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1,5 +1,6 @@ #include +#include #include #include @@ -201,21 +202,88 @@ void event__print_totals(void) event__name[i], event__total[i]); } +void mem_bswap_64(void *src, int byte_size) +{ + u64 *m = src; + + while (byte_size > 0) { + *m = bswap_64(*m); + byte_size -= sizeof(u64); + ++m; + } +} + +static void event__all64_swap(event_t *self) +{ + struct perf_event_header *hdr = &self->header; + mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); +} + +static void event__comm_swap(event_t *self) +{ + self->comm.pid = bswap_32(self->comm.pid); + self->comm.tid = bswap_32(self->comm.tid); +} + +static void event__mmap_swap(event_t *self) +{ + self->mmap.pid = bswap_32(self->mmap.pid); + self->mmap.tid = bswap_32(self->mmap.tid); + self->mmap.start = bswap_64(self->mmap.start); + self->mmap.len = bswap_64(self->mmap.len); + self->mmap.pgoff = bswap_64(self->mmap.pgoff); +} + +static void event__task_swap(event_t *self) +{ + self->fork.pid = bswap_32(self->fork.pid); + self->fork.tid = bswap_32(self->fork.tid); + self->fork.ppid = bswap_32(self->fork.ppid); + self->fork.ptid = bswap_32(self->fork.ptid); + self->fork.time = bswap_64(self->fork.time); +} + +static void event__read_swap(event_t *self) +{ + self->read.pid = bswap_32(self->read.pid); + self->read.tid = bswap_32(self->read.tid); + self->read.value = bswap_64(self->read.value); + self->read.time_enabled = bswap_64(self->read.time_enabled); + self->read.time_running = bswap_64(self->read.time_running); + self->read.id = bswap_64(self->read.id); +} + +typedef void (*event__swap_op)(event_t *self); + +static event__swap_op event__swap_ops[] = { + [PERF_RECORD_MMAP] = event__mmap_swap, + [PERF_RECORD_COMM] = event__comm_swap, + [PERF_RECORD_FORK] = event__task_swap, + [PERF_RECORD_EXIT] = event__task_swap, + [PERF_RECORD_LOST] = event__all64_swap, + [PERF_RECORD_READ] = event__read_swap, + [PERF_RECORD_SAMPLE] = event__all64_swap, + [PERF_RECORD_MAX] = NULL, +}; + static int perf_session__process_event(struct perf_session *self, event_t *event, struct perf_event_ops *ops, - unsigned long offset, unsigned long head) + u64 offset, u64 head) { trace_event(event); if (event->header.type < PERF_RECORD_MAX) { - dump_printf("%#lx [%#x]: PERF_RECORD_%s", + dump_printf("%#Lx [%#x]: PERF_RECORD_%s", offset + head, event->header.size, event__name[event->header.type]); ++event__total[0]; ++event__total[event->header.type]; } + if (self->header.needs_swap && event__swap_ops[event->header.type]) + event__swap_ops[event->header.type](event); + switch (event->header.type) { case PERF_RECORD_SAMPLE: return ops->sample(event, self); @@ -241,7 +309,15 @@ static int perf_session__process_event(struct perf_session *self, } } -int perf_header__read_build_ids(int input, u64 offset, u64 size) +void perf_event_header__bswap(struct perf_event_header *self) +{ + self->type = bswap_32(self->type); + self->misc = bswap_16(self->misc); + self->size = bswap_16(self->size); +} + +int perf_header__read_build_ids(struct perf_header *self, + int input, u64 offset, u64 size) { struct build_id_event bev; char filename[PATH_MAX]; @@ -256,6 +332,9 @@ int perf_header__read_build_ids(int input, u64 offset, u64 size) if (read(input, &bev, sizeof(bev)) != sizeof(bev)) goto out; + if (self->needs_swap) + perf_event_header__bswap(&bev.header); + len = bev.header.size - sizeof(bev); if (read(input, filename, len) != len) goto out; @@ -292,9 +371,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se int perf_session__process_events(struct perf_session *self, struct perf_event_ops *ops) { - int err; - unsigned long head, shift; - unsigned long offset = 0; + int err, mmap_prot, mmap_flags; + u64 head, shift; + u64 offset = 0; size_t page_size; event_t *event; uint32_t size; @@ -330,9 +409,16 @@ out_getcwd_err: offset += shift; head -= shift; + mmap_prot = PROT_READ; + mmap_flags = MAP_SHARED; + + if (self->header.needs_swap) { + mmap_prot |= PROT_WRITE; + mmap_flags = MAP_PRIVATE; + } remap: - buf = mmap(NULL, page_size * self->mmap_window, PROT_READ, - MAP_SHARED, self->fd, offset); + buf = mmap(NULL, page_size * self->mmap_window, mmap_prot, + mmap_flags, self->fd, offset); if (buf == MAP_FAILED) { pr_err("failed to mmap file\n"); err = -errno; @@ -342,6 +428,8 @@ remap: more: event = (event_t *)(buf + head); + if (self->header.needs_swap) + perf_event_header__bswap(&event->header); size = event->header.size; if (size == 0) size = 8; @@ -361,12 +449,12 @@ more: size = event->header.size; - dump_printf("\n%#lx [%#x]: event: %d\n", + dump_printf("\n%#Lx [%#x]: event: %d\n", offset + head, event->header.size, event->header.type); if (size == 0 || perf_session__process_event(self, event, ops, offset, head) < 0) { - dump_printf("%#lx [%#x]: skipping unknown header type: %d\n", + dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", offset + head, event->header.size, event->header.type); /* diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index d4a9d20f8d44..36d1a80c0b6c 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -51,6 +51,8 @@ struct perf_event_ops { struct perf_session *perf_session__new(const char *filename, int mode, bool force); void perf_session__delete(struct perf_session *self); +void perf_event_header__bswap(struct perf_event_header *self); + int perf_session__process_events(struct perf_session *self, struct perf_event_ops *event_ops); @@ -61,7 +63,8 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self, bool perf_session__has_traces(struct perf_session *self, const char *msg); -int perf_header__read_build_ids(int input, u64 offset, u64 file_size); +int perf_header__read_build_ids(struct perf_header *self, int input, + u64 offset, u64 file_size); int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, const char *symbol_name, @@ -69,4 +72,6 @@ int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, void perf_session__reloc_vmlinux_maps(struct perf_session *self, u64 unrelocated_addr); +void mem_bswap_64(void *src, int byte_size); + #endif /* __PERF_SESSION_H */ -- cgit v1.2.3-58-ga151 From 1b75962e92d48a41019d4b440e221638aa2a7238 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 18:30:04 -0200 Subject: perf tools: Convert getpagesize() uses to sysconf(_SC_GETPAGESIZE) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using the more portable and equivalent sysconf call. Reported-by: Aristeu Rozanski Reported-by: Ulrich Drepper Signed-off-by: Arnaldo Carvalho de Melo Cc: Aristeu Rozanski Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ulrich Drepper LKML-Reference: <1263501006-14185-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/session.c | 2 +- tools/perf/util/trace-event-info.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 604e14f6a6f9..1951e330377c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -384,7 +384,7 @@ int perf_session__process_events(struct perf_session *self, perf_event_ops__fill_defaults(ops); - page_size = getpagesize(); + page_size = sysconf(_SC_PAGESIZE); head = self->header.data_offset; diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index 407fd65b6cdb..5ea8973ad331 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -515,7 +515,7 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) write_or_die(buf, 1); /* save page_size */ - page_size = getpagesize(); + page_size = sysconf(_SC_PAGESIZE); write_or_die(&page_size, 4); read_header_files(); -- cgit v1.2.3-58-ga151 From 8d0591f6ad9edf66697ce29de176fb6f3213b9e3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 18:30:05 -0200 Subject: perf symbols: Don't try to load kallsyms if doesn't match the record build-id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now a perf.data file collected on a x86_64 fedora 12 machine gets properly parsed on a Debian testing PARISC64 machine with 32-bit userland: acme@parisc:~/git/linux-2.6-tip$ perf report 2> /dev/null | head -15 # Samples: 293085637 # # Overhead Command Shared Object Symbol # ........ ............... ................................. ...... # 35.11% find [kernel.kallsyms] [k] 0xffffffff81002b5a 18.25% perf [kernel.kallsyms] [k] 0xffffffff8102235f 9.07% find find [.] 0x0000000000fb0e 5.80% swapper [kernel.kallsyms] [k] 0xffffffff8102235f 3.29% perf libc-2.10.2.so [.] __GI_strcmp 2.70% find libc-2.10.2.so [.] __GI_memmove 2.33% init [kernel.kallsyms] [k] 0xffffffff810091b9 2.03% find libc-2.10.2.so [.] _int_malloc 1.67% find libc-2.10.2.so [.] _IO_vfprintf_internal 1.65% sshd libcrypto.so.0.9.8k [.] 0x00000000105440 acme@parisc:~/git/linux-2.6-tip$ Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263501006-14185-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 381999dd5c1f..71d23e1e30e8 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1608,11 +1608,11 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, u8 kallsyms_build_id[BUILD_ID_SIZE]; if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, - sizeof(kallsyms_build_id)) == 0) - - is_kallsyms = dso__build_id_equal(self, kallsyms_build_id); - if (is_kallsyms) - goto do_kallsyms; + sizeof(kallsyms_build_id)) == 0) { + is_kallsyms = dso__build_id_equal(self, kallsyms_build_id); + if (is_kallsyms) + goto do_kallsyms; + } goto do_vmlinux; } @@ -1623,6 +1623,9 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, do_vmlinux: err = dso__load_vmlinux(self, map, session, self->long_name, filter); if (err <= 0) { + if (self->has_build_id) + return -1; + pr_info("The file %s cannot be used, " "trying to use /proc/kallsyms...", self->long_name); do_kallsyms: -- cgit v1.2.3-58-ga151 From 9e201442de7c954f03710ac76f28c1927d07550c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 18:30:06 -0200 Subject: perf symbols: Cache /proc/kallsyms files by build-id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that when we don't have a vmlinux handy we can store the kallsyms for later use by 'perf report'. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263501006-14185-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 2 +- tools/perf/util/header.c | 15 +++++++++++---- tools/perf/util/symbol.c | 48 +++++++++++++++++++++++++++++++++++------------- tools/perf/util/symbol.h | 5 +++-- tools/perf/util/util.c | 30 ++++++++++++++++++++++++++++++ 5 files changed, 80 insertions(+), 20 deletions(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 24ec5be4a1c0..0e9820ac4f5e 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -245,7 +245,7 @@ int event__synthesize_kernel_mmap(event__handler_t process, */ struct process_symbol_args args = { .name = symbol_name, }; - if (kallsyms__parse(&args, find_symbol_cb) <= 0) + if (kallsyms__parse("/proc/kallsyms", &args, find_symbol_cb) <= 0) return -ENOENT; size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index b31e0ae4b8db..1b65fed0dd2d 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -237,11 +237,13 @@ static int dso__cache_build_id(struct dso *self, const char *debugdir) char *filename = malloc(size), *linkname = malloc(size), *targetname, *sbuild_id; int len, err = -1; + bool is_kallsyms = self->kernel && self->long_name[0] != '/'; if (filename == NULL || linkname == NULL) goto out_free; - len = snprintf(filename, size, "%s%s", debugdir, self->long_name); + len = snprintf(filename, size, "%s%s%s", + debugdir, is_kallsyms ? "/" : "", self->long_name); if (mkdir_p(filename, 0755)) goto out_free; @@ -249,9 +251,14 @@ static int dso__cache_build_id(struct dso *self, const char *debugdir) sbuild_id = filename + len; build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); - if (access(filename, F_OK) && link(self->long_name, filename) && - copyfile(self->long_name, filename)) - goto out_free; + if (access(filename, F_OK)) { + if (is_kallsyms) { + if (copyfile("/proc/kallsyms", filename)) + goto out_free; + } else if (link(self->long_name, filename) && + copyfile(self->long_name, filename)) + goto out_free; + } len = snprintf(linkname, size, "%s/.build-id/%.2s", debugdir, sbuild_id); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 71d23e1e30e8..ae61e9f4d6eb 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -383,13 +383,14 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) return ret; } -int kallsyms__parse(void *arg, int (*process_symbol)(void *arg, const char *name, +int kallsyms__parse(const char *filename, void *arg, + int (*process_symbol)(void *arg, const char *name, char type, u64 start)) { char *line = NULL; size_t n; int err = 0; - FILE *file = fopen("/proc/kallsyms", "r"); + FILE *file = fopen(filename, "r"); if (file == NULL) goto out_failure; @@ -466,10 +467,11 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int dso__load_all_kallsyms(struct dso *self, struct map *map) +static int dso__load_all_kallsyms(struct dso *self, const char *filename, + struct map *map) { struct process_kallsyms_args args = { .map = map, .dso = self, }; - return kallsyms__parse(&args, map__process_kallsym_symbol); + return kallsyms__parse(filename, &args, map__process_kallsym_symbol); } /* @@ -556,10 +558,10 @@ discard_symbol: rb_erase(&pos->rb_node, root); } -static int dso__load_kallsyms(struct dso *self, struct map *map, +static int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, struct perf_session *session, symbol_filter_t filter) { - if (dso__load_all_kallsyms(self, map) < 0) + if (dso__load_all_kallsyms(self, filename, map) < 0) return -1; symbols__fixup_end(&self->symbols[map->type]); @@ -1580,7 +1582,8 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, struct perf_session *session, symbol_filter_t filter) { int err; - bool is_kallsyms; + const char *kallsyms_filename = NULL; + char *kallsyms_allocated_filename = NULL; if (vmlinux_path != NULL) { int i; @@ -1606,19 +1609,37 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, */ if (self->has_build_id) { u8 kallsyms_build_id[BUILD_ID_SIZE]; + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, sizeof(kallsyms_build_id)) == 0) { - is_kallsyms = dso__build_id_equal(self, kallsyms_build_id); - if (is_kallsyms) + if (dso__build_id_equal(self, kallsyms_build_id)) { + kallsyms_filename = "/proc/kallsyms"; goto do_kallsyms; + } } + + build_id__sprintf(self->build_id, sizeof(self->build_id), + sbuild_id); + + if (asprintf(&kallsyms_allocated_filename, + "%s/.debug/[kernel.kallsyms]/%s", + getenv("HOME"), sbuild_id) != -1) { + if (access(kallsyms_filename, F_OK)) { + kallsyms_filename = kallsyms_allocated_filename; + goto do_kallsyms; + } + free(kallsyms_allocated_filename); + kallsyms_allocated_filename = NULL; + } + goto do_vmlinux; } - is_kallsyms = self->long_name[0] == '['; - if (is_kallsyms) + if (self->long_name[0] == '[') { + kallsyms_filename = "/proc/kallsyms"; goto do_kallsyms; + } do_vmlinux: err = dso__load_vmlinux(self, map, session, self->long_name, filter); @@ -1629,9 +1650,10 @@ do_vmlinux: pr_info("The file %s cannot be used, " "trying to use /proc/kallsyms...", self->long_name); do_kallsyms: - err = dso__load_kallsyms(self, map, session, filter); - if (err > 0 && !is_kallsyms) + err = dso__load_kallsyms(self, kallsyms_filename, map, session, filter); + if (err > 0 && kallsyms_filename == NULL) dso__set_long_name(self, strdup("[kernel.kallsyms]")); + free(kallsyms_allocated_filename); } if (err > 0) { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 594156e43b10..36b7c717f5ee 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -144,8 +144,9 @@ int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); -int kallsyms__parse(void *arg, int (*process_symbol)(void *arg, const char *name, - char type, u64 start)); +int kallsyms__parse(const char *filename, void *arg, + int (*process_symbol)(void *arg, const char *name, + char type, u64 start)); int symbol__init(void); bool symbol_type__is_a(char symbol_type, enum map_type map_type); diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index f3c0798a5e78..f0685849b244 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -32,6 +32,33 @@ int mkdir_p(char *path, mode_t mode) return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; } +static int slow_copyfile(const char *from, const char *to) +{ + int err = 0; + char *line = NULL; + size_t n; + FILE *from_fp = fopen(from, "r"), *to_fp; + + if (from_fp == NULL) + goto out; + + to_fp = fopen(to, "w"); + if (to_fp == NULL) + goto out_fclose_from; + + while (getline(&line, &n, from_fp) > 0) + if (fputs(line, to_fp) == EOF) + goto out_fclose_to; + err = 0; +out_fclose_to: + fclose(to_fp); + free(line); +out_fclose_from: + fclose(from_fp); +out: + return err; +} + int copyfile(const char *from, const char *to) { int fromfd, tofd; @@ -42,6 +69,9 @@ int copyfile(const char *from, const char *to) if (stat(from, &st)) goto out; + if (st.st_size == 0) /* /proc? do it slowly... */ + return slow_copyfile(from, to); + fromfd = open(from, O_RDONLY); if (fromfd < 0) goto out; -- cgit v1.2.3-58-ga151 From cf4e5b0838e822dd404638ad00d35b63fffe8191 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 23:45:27 -0200 Subject: perf symbols: Use dso->long_name in dsos__find() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If not we end up duplicating the module DSOs because first we insert them using the short name found in /proc/modules, then, when processing synthesized MMAP events we add them again. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263519930-22803-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index ae61e9f4d6eb..4267138c7bbe 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1679,7 +1679,7 @@ static struct dso *dsos__find(struct list_head *head, const char *name) struct dso *pos; list_for_each_entry(pos, head, node) - if (strcmp(pos->name, name) == 0) + if (strcmp(pos->long_name, name) == 0) return pos; return NULL; } -- cgit v1.2.3-58-ga151 From 18c3daa4961b9fa1f2db0711d93c0acf0c39fd12 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 23:45:28 -0200 Subject: perf record: Encode the domain while synthesizing MMAP events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the past 'perf record' had to process only userspace MMAP events, the ones generated in the kernel, but after we reused the MMAP events to encode the module mapings we ended up adding them first to the list of userspace DSOs (dsos__user) and to the kernel one (dsos__kernel). Fix this by encoding the header.misc field and then using it, like other parts to decide the right DSOs list to insert/find. The gotcha here is that since the kernel puts zero in .misc, which isn't PERF_RECORD_MISC_KERNEL (1 << 1), to differentiate, we put 1 in .misc. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263519930-22803-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 8 ++++++-- tools/perf/util/event.c | 11 +++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index c130df2676f1..614fa9a4c67c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -117,8 +117,12 @@ static void write_event(event_t *buf, size_t size) * Add it to the list of DSOs, so that when we finish this * record session we can pick the available build-ids. */ - if (buf->header.type == PERF_RECORD_MMAP) - dsos__findnew(buf->mmap.filename); + if (buf->header.type == PERF_RECORD_MMAP) { + struct list_head *head = &dsos__user; + if (buf->mmap.header.misc == 1) + head = &dsos__kernel; + __dsos__findnew(head, buf->mmap.filename); + } write_output(buf, size); } diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 0e9820ac4f5e..1abaefc126a8 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -110,7 +110,10 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, while (1) { char bf[BUFSIZ], *pbf = bf; event_t ev = { - .header = { .type = PERF_RECORD_MMAP }, + .header = { + .type = PERF_RECORD_MMAP, + .misc = 0, /* Just like the kernel, see kernel/perf_event.c __perf_event_mmap */ + }, }; int n; size_t size; @@ -170,6 +173,7 @@ int event__synthesize_modules(event__handler_t process, size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); memset(&ev, 0, sizeof(ev)); + ev.mmap.header.misc = 1; /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */ ev.mmap.header.type = PERF_RECORD_MMAP; ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size)); @@ -236,7 +240,10 @@ int event__synthesize_kernel_mmap(event__handler_t process, { size_t size; event_t ev = { - .header = { .type = PERF_RECORD_MMAP }, + .header = { + .type = PERF_RECORD_MMAP, + .misc = 1, /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */ + }, }; /* * We should get this from /sys/kernel/sections/.text, but till that is -- cgit v1.2.3-58-ga151 From 59ee68ecd1561a233fb6ad351980bea8402533e7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 23:45:29 -0200 Subject: perf symbols: Create thread__find_addr_map from thread__find_addr_location MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because some tools will only want to know with maps had hits, not needing the full symbol resolution done by thread__find_addr_location. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263519930-22803-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 26 +++++++++++++++++--------- tools/perf/util/thread.h | 5 +++++ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1abaefc126a8..5a6e827a09eb 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -422,11 +422,10 @@ int event__process_task(event_t *self, struct perf_session *session) return 0; } -void thread__find_addr_location(struct thread *self, - struct perf_session *session, u8 cpumode, - enum map_type type, u64 addr, - struct addr_location *al, - symbol_filter_t filter) +void thread__find_addr_map(struct thread *self, + struct perf_session *session, u8 cpumode, + enum map_type type, u64 addr, + struct addr_location *al) { struct map_groups *mg = &self->mg; @@ -441,7 +440,6 @@ void thread__find_addr_location(struct thread *self, else { al->level = 'H'; al->map = NULL; - al->sym = NULL; return; } try_again: @@ -460,11 +458,21 @@ try_again: mg = &session->kmaps; goto try_again; } - al->sym = NULL; - } else { + } else al->addr = al->map->map_ip(al->map, al->addr); +} + +void thread__find_addr_location(struct thread *self, + struct perf_session *session, u8 cpumode, + enum map_type type, u64 addr, + struct addr_location *al, + symbol_filter_t filter) +{ + thread__find_addr_map(self, session, cpumode, type, addr, al); + if (al->map != NULL) al->sym = map__find_symbol(al->map, session, al->addr, filter); - } + else + al->sym = NULL; } static void dso__calc_col_width(struct dso *self) diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index c06c13535a70..e35653c1817c 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -48,6 +48,11 @@ static inline struct map *thread__find_map(struct thread *self, return self ? map_groups__find(&self->mg, type, addr) : NULL; } +void thread__find_addr_map(struct thread *self, + struct perf_session *session, u8 cpumode, + enum map_type type, u64 addr, + struct addr_location *al); + void thread__find_addr_location(struct thread *self, struct perf_session *session, u8 cpumode, enum map_type type, u64 addr, -- cgit v1.2.3-58-ga151 From 88d3d9b7c843a42cb73c55a2d13cd1041da31fb9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 14 Jan 2010 23:45:30 -0200 Subject: perf buildid-list: Introduce --with-hits option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using this option 'perf buildid-list' will process all samples, marking the DSOs that had some hits to list just them. This in turn will be used by a new porcelain, 'perf archive', that will be just a shell script to create a tarball from the 'perf buildid-list --with-hits' output and the files cached by 'perf record' in ~/.debug. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263519930-22803-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-buildid-list.c | 35 ++++++++++++++++++++++++++++++++++- tools/perf/util/symbol.c | 11 +++++++---- tools/perf/util/symbol.h | 3 ++- 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 4229c2c213cc..431f204bde64 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -16,6 +16,7 @@ static char const *input_name = "perf.data"; static int force; +static bool with_hits; static const char * const buildid_list_usage[] = { "perf buildid-list []", @@ -23,6 +24,7 @@ static const char * const buildid_list_usage[] = { }; static const struct option options[] = { + OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"), OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), @@ -31,6 +33,34 @@ static const struct option options[] = { OPT_END() }; +static int build_id_list__process_event(event_t *event, + struct perf_session *session) +{ + struct addr_location al; + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread = perf_session__findnew(session, event->ip.pid); + + if (thread == NULL) { + pr_err("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, + event->ip.ip, &al); + + if (al.map != NULL) + al.map->dso->hit = 1; + + return 0; +} + +static struct perf_event_ops build_id_list__event_ops = { + .sample = build_id_list__process_event, + .mmap = event__process_mmap, + .fork = event__process_task, +}; + static int __cmd_buildid_list(void) { int err = -1; @@ -40,7 +70,10 @@ static int __cmd_buildid_list(void) if (session == NULL) return -1; - dsos__fprintf_buildid(stdout); + if (with_hits) + perf_session__process_events(session, &build_id_list__event_ops); + + dsos__fprintf_buildid(stdout, with_hits); perf_session__delete(session); return err; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4267138c7bbe..a4e745934584 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1716,22 +1716,25 @@ void dsos__fprintf(FILE *fp) __dsos__fprintf(&dsos__user, fp); } -static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp) +static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, + bool with_hits) { struct dso *pos; size_t ret = 0; list_for_each_entry(pos, head, node) { + if (with_hits && !pos->hit) + continue; ret += dso__fprintf_buildid(pos, fp); ret += fprintf(fp, " %s\n", pos->long_name); } return ret; } -size_t dsos__fprintf_buildid(FILE *fp) +size_t dsos__fprintf_buildid(FILE *fp, bool with_hits) { - return (__dsos__fprintf_buildid(&dsos__kernel, fp) + - __dsos__fprintf_buildid(&dsos__user, fp)); + return (__dsos__fprintf_buildid(&dsos__kernel, fp, with_hits) + + __dsos__fprintf_buildid(&dsos__user, fp, with_hits)); } static struct dso *dsos__create_kernel(const char *vmlinux) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 36b7c717f5ee..525085fd0735 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -97,6 +97,7 @@ struct dso { u8 slen_calculated:1; u8 has_build_id:1; u8 kernel:1; + u8 hit:1; unsigned char origin; u8 sorted_by_name; u8 loaded; @@ -129,7 +130,7 @@ struct perf_session; int dso__load(struct dso *self, struct map *map, struct perf_session *session, symbol_filter_t filter); void dsos__fprintf(FILE *fp); -size_t dsos__fprintf_buildid(FILE *fp); +size_t dsos__fprintf_buildid(FILE *fp, bool with_hits); size_t dso__fprintf_buildid(struct dso *self, FILE *fp); size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); -- cgit v1.2.3-58-ga151 From 460848fceffc91652b2d36d19db4ac40d12fb607 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 15 Jan 2010 13:17:51 -0200 Subject: perf symbols: The synthesized kernel modules MMAP must use the pathnames MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we use ->long_name in dsos__find now. Now 'perf buildid_list' is not duplicating those and managing to show the proper build-ids for the DSOs with hits: [root@doppio linux-2.6-tip]# perf buildid-list -H 74f9930ee94475b6b3238caf3725a50d59cb994b [kernel.kallsyms] 9ffdcac0a7935922d1f04b6cc9029dfef0f066ef /lib/modules/2.6.33-rc4-tip+/kernel/arch/x86/crypto/aes-x86_64.ko 3aaf89c32ebfc438ff546c93597d41788e3e65f3 /lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/wireless/iwlwifi/iwl3945.ko 19f46033f73e1ec612937189bb118c5daba5a0c8 /lib/modules/2.6.33-rc4-tip+/kernel/net/mac80211/mac80211.ko 1772f014a7a7272859655acb0c64a20ab20b75ee /lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/e1000e/e1000e.ko eb4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 /lib64/libc-2.10.2.so 5c68f7afeb33309c78037e374b0deee84dd441f6 /lib64/libpthread-2.10.2.so e9c9ad5c138ef882e4507d2605645b597da43873 /bin/dbus-daemon bcda7d09eb6c9ee380dae0ed3d591d4311decc31 /lib64/libdbus-1.so.3.4.0 7cc449a77f48b85d6088114000e970ced613bed8 /usr/lib64/libcrypto.so.0.9.8k fdd1ccd1ff7917ab020653147ab3bacf0a85b5b9 /lib64/libglib-2.0.so.0.2000.5 e4417ebb8762e5f2eee93c8011a71115ff5edad8 /lib64/libgobject-2.0.so.0.2000.5 931e49461f6df99104f0febcc52f6fed5e2efce6 /usr/sbin/sshd dab5f724c088f89fbd8304da553ed6cb30bbec96 /usr/lib64/libgdk-x11-2.0.so.0.1600.6 f2037a091ef36b591187a858d75e203690ea9409 /usr/sbin/openvpn a8e4f743b40fb1fd8b85e2f9b88d93b661472b8f /bin/find 81120aada06e68b1e85882925a0fc6d7345ef59a /home/acme/bin/perf [root@doppio linux-2.6-tip]# Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263568672-30323-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 5a6e827a09eb..966d207a1509 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -345,15 +345,15 @@ int event__process_mmap(event_t *self, struct perf_session *session) map = perf_session__new_module_map(session, self->mmap.start, - short_module_name); + self->mmap.filename); if (map == NULL) goto out_problem; - name = strdup(self->mmap.filename); + name = strdup(short_module_name); if (name == NULL) goto out_problem; - dso__set_long_name(map->dso, name); + map->dso->short_name = name; map->end = map->start + self->mmap.len; } else if (memcmp(self->mmap.filename, kmmap_prefix, sizeof(kmmap_prefix) - 1) == 0) { -- cgit v1.2.3-58-ga151 From 2c5851747bcf751908c02e253cb7582d342b4612 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 15 Jan 2010 13:17:52 -0200 Subject: perf archive: Add helper script to package files needed to do analysis MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It uses 'perf buildid-list --with-hits' to create a tarball with what is needed to have in the destination machine ~/.debug hierarchy to properly decode the perf.data file specified. Here is an example where a perf.data file collected on a x86-64 machine running Fedora 12 is used and then the data is packaged, transferred and decoded on a PARISC64 machine running Debian Testing, 32-bit userspace: [root@doppio linux-2.6-tip]# uname -a Linux doppio.ghostprotocols.net 2.6.33-rc4-tip+ #3 SMP Wed Jan 13 11:58:15 BRST 2010 x86_64 x86_64 x86_64 GNU/Linux [root@doppio linux-2.6-tip]# perf archive [root@doppio linux-2.6-tip]# ls -la perf.data* -rw------- 1 root root 737696 2010-01-14 23:36 perf.data -rw-r--r-- 1 root root 8840025 2010-01-15 12:27 perf.data.tar.bz2 [root@doppio linux-2.6-tip]# scp perf.data.* parisc64:. Password: perf.data.tar.bz2 100% 8633KB 1.4MB/s 00:06 [root@doppio linux-2.6-tip]# ssh parisc64 Password: Linux parisc 2.6.19-g2bbf29ac-dirty #1 Sun Dec 3 17:24:04 BRST 2006 parisc64 The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. Last login: Thu Jan 14 11:23:24 2010 from d parisc:~# uname -a Linux parisc 2.6.19-g2bbf29ac-dirty #1 Sun Dec 3 17:24:04 BRST 2006 parisc64 GNU/Linux parisc:~# mkdir .debug parisc:~# tar xvf perf.data.tar.bz2 -C ~/.debug tar: Record size = 8 blocks .build-id/74/f9930ee94475b6b3238caf3725a50d59cb994b [kernel.kallsyms]/74f9930ee94475b6b3238caf3725a50d59cb994b .build-id/9f/fdcac0a7935922d1f04b6cc9029dfef0f066ef lib/modules/2.6.33-rc4-tip+/kernel/arch/x86/crypto/aes-x86_64.ko/9ffdcac0a7935922d1f04b6cc9029dfef0f066ef .build-id/3a/af89c32ebfc438ff546c93597d41788e3e65f3 lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/wireless/iwlwifi/iwl3945.ko/3aaf89c32ebfc438ff546c93597d41788e3e65f3 .build-id/19/f46033f73e1ec612937189bb118c5daba5a0c8 lib/modules/2.6.33-rc4-tip+/kernel/net/mac80211/mac80211.ko/19f46033f73e1ec612937189bb118c5daba5a0c8 .build-id/17/72f014a7a7272859655acb0c64a20ab20b75ee lib/modules/2.6.33-rc4-tip+/kernel/drivers/net/e1000e/e1000e.ko/1772f014a7a7272859655acb0c64a20ab20b75ee .build-id/eb/4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 lib64/libc-2.10.2.so/eb4ec8fa8b2a5eb18cad173c92f27ed8887ed1c1 .build-id/5c/68f7afeb33309c78037e374b0deee84dd441f6 lib64/libpthread-2.10.2.so/5c68f7afeb33309c78037e374b0deee84dd441f6 .build-id/e9/c9ad5c138ef882e4507d2605645b597da43873 bin/dbus-daemon/e9c9ad5c138ef882e4507d2605645b597da43873 .build-id/bc/da7d09eb6c9ee380dae0ed3d591d4311decc31 lib64/libdbus-1.so.3.4.0/bcda7d09eb6c9ee380dae0ed3d591d4311decc31 .build-id/7c/c449a77f48b85d6088114000e970ced613bed8 usr/lib64/libcrypto.so.0.9.8k/7cc449a77f48b85d6088114000e970ced613bed8 .build-id/fd/d1ccd1ff7917ab020653147ab3bacf0a85b5b9 lib64/libglib-2.0.so.0.2000.5/fdd1ccd1ff7917ab020653147ab3bacf0a85b5b9 .build-id/e4/417ebb8762e5f2eee93c8011a71115ff5edad8 lib64/libgobject-2.0.so.0.2000.5/e4417ebb8762e5f2eee93c8011a71115ff5edad8 .build-id/93/1e49461f6df99104f0febcc52f6fed5e2efce6 usr/sbin/sshd/931e49461f6df99104f0febcc52f6fed5e2efce6 .build-id/da/b5f724c088f89fbd8304da553ed6cb30bbec96 usr/lib64/libgdk-x11-2.0.so.0.1600.6/dab5f724c088f89fbd8304da553ed6cb30bbec96 .build-id/f2/037a091ef36b591187a858d75e203690ea9409 usr/sbin/openvpn/f2037a091ef36b591187a858d75e203690ea9409 .build-id/a8/e4f743b40fb1fd8b85e2f9b88d93b661472b8f bin/find/a8e4f743b40fb1fd8b85e2f9b88d93b661472b8f .build-id/81/120aada06e68b1e85882925a0fc6d7345ef59a home/acme/bin/perf/81120aada06e68b1e85882925a0fc6d7345ef59a parisc:~# perf report 2> /dev/null | head -25 9.07% find find [.] 0x0000000000fb0e 3.29% perf libc-2.10.2.so [.] __GI_strcmp 3.19% find [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 2.70% find libc-2.10.2.so [.] __GI_memmove 2.62% perf [kernel.kallsyms] [k] vsnprintf 2.03% find libc-2.10.2.so [.] _int_malloc 2.02% perf [kernel.kallsyms] [k] format_decode 1.70% find [kernel.kallsyms] [k] n_tty_write 1.70% find [kernel.kallsyms] [k] half_md4_transform 1.67% find libc-2.10.2.so [.] _IO_vfprintf_internal 1.66% perf [kernel.kallsyms] [k] audit_free_aux 1.62% swapper [kernel.kallsyms] [k] mwait_idle_with_hints 1.58% find [kernel.kallsyms] [k] __kmalloc 1.35% find [kernel.kallsyms] [k] sched_clock_local 1.35% find [kernel.kallsyms] [k] ext4_check_dir_entry 1.35% find [kernel.kallsyms] [k] ext4_htree_store_dirent 1.35% find [kernel.kallsyms] [k] sys_write 1.35% find [e1000e] [k] e1000_clean 1.35% find [kernel.kallsyms] [k] _atomic_dec_and_lock 1.34% find [kernel.kallsyms] [k] __d_lookup parisc:~# Probably the next step is to have 'perf report' notice that there is a perf.data.tar.bz2 file in the same directory and look if it was already added to ~/.debug/. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263568672-30323-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 10 ++-------- tools/perf/command-list.txt | 1 + tools/perf/perf-archive.sh | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 tools/perf/perf-archive.sh diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2c03a9411317..d739552036d0 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -286,11 +286,7 @@ SCRIPT_PERL = SCRIPT_SH = TEST_PROGRAMS = -# -# No scripts right now: -# - -# SCRIPT_SH += perf-am.sh +SCRIPT_SH += perf-archive.sh # # No Perl scripts right now: @@ -315,9 +311,7 @@ PROGRAMS += perf # List built-in command $C whose implementation cmd_$C() is not in # builtin-$C.o but is linked in as part of some other command. # -# None right now: -# -# BUILT_INS += perf-init $X +BUILT_INS += perf-archive # what 'all' will build and 'install' will install, in perfexecdir ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index 71dc7c3fe7b2..f73d1d90f5bd 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -3,6 +3,7 @@ # command name category [deprecated] [common] # perf-annotate mainporcelain common +perf-archive mainporcelain perf-bench mainporcelain common perf-buildid-list mainporcelain common perf-diff mainporcelain common diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh new file mode 100644 index 000000000000..45fbe2f07b15 --- /dev/null +++ b/tools/perf/perf-archive.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# perf archive +# Arnaldo Carvalho de Melo + +PERF_DATA=perf.data +if [ $# -ne 0 ] ; then + PERF_DATA=$1 +fi + +DEBUGDIR=~/.debug/ +BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX) + +perf buildid-list -i $PERF_DATA --with-hits > $BUILDIDS +if [ ! -s $BUILDIDS ] ; then + echo "perf archive: no build-ids found" + rm -f $BUILDIDS + exit 1 +fi + +MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX) + +cut -d ' ' -f 1 $BUILDIDS | \ +while read build_id ; do + linkname=$DEBUGDIR.build-id/${build_id:0:2}/${build_id:2} + filename=$(readlink -f $linkname) + echo ${linkname#$DEBUGDIR} >> $MANIFEST + echo ${filename#$DEBUGDIR} >> $MANIFEST +done + +tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST +rm -f $MANIFEST $BUILDIDS +exit 0 -- cgit v1.2.3-58-ga151 From f5a2c3dce03621b55f84496f58adc2d1a87ca16f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 15 Jan 2010 18:08:26 -0200 Subject: perf record: Intercept all events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The event interception we need to do in 'perf record' to create a list of all DSOs in PERF_RECORD_MMAP events wasn't seeing all events, make sure that happens by checking size agains event_t->header.size. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263586107-1756-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 614fa9a4c67c..7bb9ca1b30fa 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -113,16 +113,24 @@ static void write_output(void *buf, size_t size) static void write_event(event_t *buf, size_t size) { - /* - * Add it to the list of DSOs, so that when we finish this - * record session we can pick the available build-ids. - */ - if (buf->header.type == PERF_RECORD_MMAP) { - struct list_head *head = &dsos__user; - if (buf->mmap.header.misc == 1) - head = &dsos__kernel; - __dsos__findnew(head, buf->mmap.filename); - } + size_t processed_size = buf->header.size; + event_t *ev = buf; + + do { + /* + * Add it to the list of DSOs, so that when we finish this + * record session we can pick the available build-ids. + */ + if (ev->header.type == PERF_RECORD_MMAP) { + struct list_head *head = &dsos__user; + if (ev->header.misc == 1) + head = &dsos__kernel; + __dsos__findnew(head, ev->mmap.filename); + } + + ev = ((void *)ev) + ev->header.size; + processed_size += ev->header.size; + } while (processed_size < size); write_output(buf, size); } -- cgit v1.2.3-58-ga151 From 881516eb828a3f7276c378bcef96b7788fc99016 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 15 Jan 2010 18:08:27 -0200 Subject: perf symbols: Accept an alias when looking for "_text" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As it is in PARISC64: parisc:~# uname -a Linux parisc 2.6.33-rc4-tip+ #1 SMP Thu Jan 14 13:33:34 BRST 2010 parisc64 GNU/Linux parisc:~# grep -w _text /proc/kallsyms 0000000040100000 A _text parisc:~# grep 0000000040100000 /proc/kallsyms 0000000040100000 T stext 0000000040100000 T _stext 0000000040100000 A _text parisc:~# Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263586107-1756-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 966d207a1509..dc13cad828d7 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -227,7 +227,12 @@ static int find_symbol_cb(void *arg, const char *name, char type, u64 start) { struct process_symbol_args *args = arg; - if (!symbol_type__is_a(type, MAP__FUNCTION) || strcmp(name, args->name)) + /* + * Must be a function or at least an alias, as in PARISC64, where "_text" is + * an 'A' to the same address as "_stext". + */ + if (!(symbol_type__is_a(type, MAP__FUNCTION) || + type == 'A') || strcmp(name, args->name)) return 0; args->start = start; -- cgit v1.2.3-58-ga151 From 889ff0150661512d79484219612b7e2e024b6c07 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 9 Jan 2010 20:04:47 +0100 Subject: perf/core: Split context's event group list into pinned and non-pinned lists Split-up struct perf_event_context::group_list into pinned_groups and flexible_groups (non-pinned). This first appears to be useless as it duplicates various loops around the group list handlings. But it scales better in the fast-path in perf_sched_in(). We don't anymore iterate twice through the entire list to separate pinned and non-pinned scheduling. Instead we interate through two distinct lists. The another desired effect is that it makes easier to define distinct scheduling rules on both. Changes in v2: - Respectively rename pinned_grp_list and volatile_grp_list into pinned_groups and flexible_groups as per Ingo suggestion. - Various cleanups Signed-off-by: Frederic Weisbecker Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 3 +- kernel/perf_event.c | 227 ++++++++++++++++++++++++++++++--------------- 2 files changed, 153 insertions(+), 77 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9a1d276db754..cdbc2aa64a0b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -683,7 +683,8 @@ struct perf_event_context { */ struct mutex mutex; - struct list_head group_list; + struct list_head pinned_groups; + struct list_head flexible_groups; struct list_head event_list; int nr_events; int nr_active; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 27f69a04541d..c9f8a757649d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -289,6 +289,15 @@ static void update_event_times(struct perf_event *event) event->total_time_running = run_end - event->tstamp_running; } +static struct list_head * +ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) +{ + if (event->attr.pinned) + return &ctx->pinned_groups; + else + return &ctx->flexible_groups; +} + /* * Add a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. @@ -303,9 +312,12 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) * add it straight to the context's event list, or to the group * leader's sibling list: */ - if (group_leader == event) - list_add_tail(&event->group_entry, &ctx->group_list); - else { + if (group_leader == event) { + struct list_head *list; + + list = ctx_group_list(event, ctx); + list_add_tail(&event->group_entry, list); + } else { list_add_tail(&event->group_entry, &group_leader->sibling_list); group_leader->nr_siblings++; } @@ -355,8 +367,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) * to the context list directly: */ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { + struct list_head *list; - list_move_tail(&sibling->group_entry, &ctx->group_list); + list = ctx_group_list(event, ctx); + list_move_tail(&sibling->group_entry, list); sibling->group_leader = sibling; } } @@ -1056,7 +1070,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx, perf_disable(); if (ctx->nr_active) { - list_for_each_entry(event, &ctx->group_list, group_entry) + list_for_each_entry(event, &ctx->pinned_groups, group_entry) + group_sched_out(event, cpuctx, ctx); + + list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); } perf_enable(); @@ -1271,9 +1288,8 @@ __perf_event_sched_in(struct perf_event_context *ctx, * First go through the list and put on any pinned groups * in order to give them the best chance of going on. */ - list_for_each_entry(event, &ctx->group_list, group_entry) { - if (event->state <= PERF_EVENT_STATE_OFF || - !event->attr.pinned) + list_for_each_entry(event, &ctx->pinned_groups, group_entry) { + if (event->state <= PERF_EVENT_STATE_OFF) continue; if (event->cpu != -1 && event->cpu != cpu) continue; @@ -1291,15 +1307,10 @@ __perf_event_sched_in(struct perf_event_context *ctx, } } - list_for_each_entry(event, &ctx->group_list, group_entry) { - /* - * Ignore events in OFF or ERROR state, and - * ignore pinned events since we did them already. - */ - if (event->state <= PERF_EVENT_STATE_OFF || - event->attr.pinned) + list_for_each_entry(event, &ctx->flexible_groups, group_entry) { + /* Ignore events in OFF or ERROR state */ + if (event->state <= PERF_EVENT_STATE_OFF) continue; - /* * Listen to the 'cpu' scheduling filter constraint * of events: @@ -1453,8 +1464,13 @@ static void rotate_ctx(struct perf_event_context *ctx) * Rotate the first entry last (works just fine for group events too): */ perf_disable(); - list_for_each_entry(event, &ctx->group_list, group_entry) { - list_move_tail(&event->group_entry, &ctx->group_list); + list_for_each_entry(event, &ctx->pinned_groups, group_entry) { + list_move_tail(&event->group_entry, &ctx->pinned_groups); + break; + } + + list_for_each_entry(event, &ctx->flexible_groups, group_entry) { + list_move_tail(&event->group_entry, &ctx->flexible_groups); break; } perf_enable(); @@ -1490,6 +1506,21 @@ void perf_event_task_tick(struct task_struct *curr) perf_event_task_sched_in(curr); } +static int event_enable_on_exec(struct perf_event *event, + struct perf_event_context *ctx) +{ + if (!event->attr.enable_on_exec) + return 0; + + event->attr.enable_on_exec = 0; + if (event->state >= PERF_EVENT_STATE_INACTIVE) + return 0; + + __perf_event_mark_enabled(event, ctx); + + return 1; +} + /* * Enable all of a task's events that have been marked enable-on-exec. * This expects task == current. @@ -1500,6 +1531,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) struct perf_event *event; unsigned long flags; int enabled = 0; + int ret; local_irq_save(flags); ctx = task->perf_event_ctxp; @@ -1510,14 +1542,16 @@ static void perf_event_enable_on_exec(struct task_struct *task) raw_spin_lock(&ctx->lock); - list_for_each_entry(event, &ctx->group_list, group_entry) { - if (!event->attr.enable_on_exec) - continue; - event->attr.enable_on_exec = 0; - if (event->state >= PERF_EVENT_STATE_INACTIVE) - continue; - __perf_event_mark_enabled(event, ctx); - enabled = 1; + list_for_each_entry(event, &ctx->pinned_groups, group_entry) { + ret = event_enable_on_exec(event, ctx); + if (ret) + enabled = 1; + } + + list_for_each_entry(event, &ctx->flexible_groups, group_entry) { + ret = event_enable_on_exec(event, ctx); + if (ret) + enabled = 1; } /* @@ -1591,7 +1625,8 @@ __perf_event_init_context(struct perf_event_context *ctx, { raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); - INIT_LIST_HEAD(&ctx->group_list); + INIT_LIST_HEAD(&ctx->pinned_groups); + INIT_LIST_HEAD(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); atomic_set(&ctx->refcount, 1); ctx->task = task; @@ -5032,7 +5067,11 @@ void perf_event_exit_task(struct task_struct *child) mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); again: - list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, + list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, + group_entry) + __perf_event_exit_task(child_event, child_ctx, child); + + list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, group_entry) __perf_event_exit_task(child_event, child_ctx, child); @@ -5041,7 +5080,8 @@ again: * its siblings to the list, but we obtained 'tmp' before that which * will still point to the list head terminating the iteration. */ - if (!list_empty(&child_ctx->group_list)) + if (!list_empty(&child_ctx->pinned_groups) || + !list_empty(&child_ctx->flexible_groups)) goto again; mutex_unlock(&child_ctx->mutex); @@ -5049,6 +5089,24 @@ again: put_ctx(child_ctx); } +static void perf_free_event(struct perf_event *event, + struct perf_event_context *ctx) +{ + struct perf_event *parent = event->parent; + + if (WARN_ON_ONCE(!parent)) + return; + + mutex_lock(&parent->child_mutex); + list_del_init(&event->child_list); + mutex_unlock(&parent->child_mutex); + + fput(parent->filp); + + list_del_event(event, ctx); + free_event(event); +} + /* * free an unexposed, unused context as created by inheritance by * init_task below, used by fork() in case of fail. @@ -5063,36 +5121,70 @@ void perf_event_free_task(struct task_struct *task) mutex_lock(&ctx->mutex); again: - list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { - struct perf_event *parent = event->parent; + list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) + perf_free_event(event, ctx); - if (WARN_ON_ONCE(!parent)) - continue; + list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, + group_entry) + perf_free_event(event, ctx); - mutex_lock(&parent->child_mutex); - list_del_init(&event->child_list); - mutex_unlock(&parent->child_mutex); + if (!list_empty(&ctx->pinned_groups) || + !list_empty(&ctx->flexible_groups)) + goto again; - fput(parent->filp); + mutex_unlock(&ctx->mutex); - list_del_event(event, ctx); - free_event(event); + put_ctx(ctx); +} + +static int +inherit_task_group(struct perf_event *event, struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + int *inherited_all) +{ + int ret; + struct perf_event_context *child_ctx = child->perf_event_ctxp; + + if (!event->attr.inherit) { + *inherited_all = 0; + return 0; } - if (!list_empty(&ctx->group_list)) - goto again; + if (!child_ctx) { + /* + * This is executed from the parent task context, so + * inherit events that have been marked for cloning. + * First allocate and initialize a context for the + * child. + */ - mutex_unlock(&ctx->mutex); + child_ctx = kzalloc(sizeof(struct perf_event_context), + GFP_KERNEL); + if (!child_ctx) + return -ENOMEM; - put_ctx(ctx); + __perf_event_init_context(child_ctx, child); + child->perf_event_ctxp = child_ctx; + get_task_struct(child); + } + + ret = inherit_group(event, parent, parent_ctx, + child, child_ctx); + + if (ret) + *inherited_all = 0; + + return ret; } + /* * Initialize the perf_event context in task_struct */ int perf_event_init_task(struct task_struct *child) { - struct perf_event_context *child_ctx = NULL, *parent_ctx; + struct perf_event_context *child_ctx, *parent_ctx; struct perf_event_context *cloned_ctx; struct perf_event *event; struct task_struct *parent = current; @@ -5130,41 +5222,22 @@ int perf_event_init_task(struct task_struct *child) * We dont have to disable NMIs - we are only looking at * the list, not manipulating it: */ - list_for_each_entry(event, &parent_ctx->group_list, group_entry) { - - if (!event->attr.inherit) { - inherited_all = 0; - continue; - } - - if (!child->perf_event_ctxp) { - /* - * This is executed from the parent task context, so - * inherit events that have been marked for cloning. - * First allocate and initialize a context for the - * child. - */ - - child_ctx = kzalloc(sizeof(struct perf_event_context), - GFP_KERNEL); - if (!child_ctx) { - ret = -ENOMEM; - break; - } - - __perf_event_init_context(child_ctx, child); - child->perf_event_ctxp = child_ctx; - get_task_struct(child); - } + list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { + ret = inherit_task_group(event, parent, parent_ctx, child, + &inherited_all); + if (ret) + break; + } - ret = inherit_group(event, parent, parent_ctx, - child, child_ctx); - if (ret) { - inherited_all = 0; + list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { + ret = inherit_task_group(event, parent, parent_ctx, child, + &inherited_all); + if (ret) break; - } } + child_ctx = child->perf_event_ctxp; + if (child_ctx && inherited_all) { /* * Mark the child context as a clone of the parent @@ -5213,7 +5286,9 @@ static void __perf_event_exit_cpu(void *info) struct perf_event_context *ctx = &cpuctx->ctx; struct perf_event *event, *tmp; - list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) + list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) + __perf_event_remove_from_context(event); + list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) __perf_event_remove_from_context(event); } static void perf_event_exit_cpu(int cpu) -- cgit v1.2.3-58-ga151 From 5908cdc85eb30f8d07f2cb11d4a62334d7229048 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 9 Jan 2010 20:53:14 +0100 Subject: list: Introduce list_rotate_left() Bring a new list_rotate_left() helper that rotates a list to the left. This is useful for codes that need to round roubin elements which queue priority increases from tail to head. Signed-off-by: Frederic Weisbecker Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- include/linux/list.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/linux/list.h b/include/linux/list.h index 969f6e92d089..5d9c6558e8ab 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -205,6 +205,20 @@ static inline int list_empty_careful(const struct list_head *head) return (next == head) && (next == head->prev); } +/** + * list_rotate_left - rotate the list to the left + * @head: the head of the list + */ +static inline void list_rotate_left(struct list_head *head) +{ + struct list_head *first; + + if (!list_empty(head)) { + first = head->next; + list_move_tail(first, head); + } +} + /** * list_is_singular - tests whether a list has just one entry. * @head: the list to test. -- cgit v1.2.3-58-ga151 From e286417378b4f9ce6e473b556193465ab22e12ab Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 9 Jan 2010 21:05:28 +0100 Subject: perf: Round robin flexible groups of events using list_rotate_left() This is more proper that doing it through a list_for_each_entry() that breaks after the first entry. v2: Don't rotate pinned groups as its not needed to time share them. Signed-off-by: Frederic Weisbecker Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- kernel/perf_event.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c9f8a757649d..bbebe2832639 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1454,25 +1454,16 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ static void rotate_ctx(struct perf_event_context *ctx) { - struct perf_event *event; - if (!ctx->nr_events) return; raw_spin_lock(&ctx->lock); - /* - * Rotate the first entry last (works just fine for group events too): - */ + + /* Rotate the first entry last of non-pinned groups */ perf_disable(); - list_for_each_entry(event, &ctx->pinned_groups, group_entry) { - list_move_tail(&event->group_entry, &ctx->pinned_groups); - break; - } - list_for_each_entry(event, &ctx->flexible_groups, group_entry) { - list_move_tail(&event->group_entry, &ctx->flexible_groups); - break; - } + list_rotate_left(&ctx->flexible_groups); + perf_enable(); raw_spin_unlock(&ctx->lock); -- cgit v1.2.3-58-ga151 From d6f962b57bfaab62891c7abbf1469212a56d6103 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 10 Jan 2010 01:25:51 +0100 Subject: perf: Export software-only event group characteristic as a flag Before scheduling an event group, we first check if a group can go on. We first check if the group is made of software only events first, in which case it is enough to know if the group can be scheduled in. For that purpose, we iterate through the whole group, which is wasteful as we could do this check when we add/delete an event to a group. So we create a group_flags field in perf event that can host characteristics from a group of events, starting with a first PERF_GROUP_SOFTWARE flag that reduces the check on the fast path. Signed-off-by: Frederic Weisbecker Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 5 +++++ kernel/perf_event.c | 30 +++++++++++------------------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index cdbc2aa64a0b..c6f812e4d058 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -565,6 +565,10 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, struct perf_sample_data *, struct pt_regs *regs); +enum perf_group_flag { + PERF_GROUP_SOFTWARE = 0x1, +}; + /** * struct perf_event - performance event kernel representation: */ @@ -574,6 +578,7 @@ struct perf_event { struct list_head event_entry; struct list_head sibling_list; int nr_siblings; + int group_flags; struct perf_event *group_leader; struct perf_event *output; const struct pmu *pmu; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index bbebe2832639..eae6ff693604 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -315,9 +315,16 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) if (group_leader == event) { struct list_head *list; + if (is_software_event(event)) + event->group_flags |= PERF_GROUP_SOFTWARE; + list = ctx_group_list(event, ctx); list_add_tail(&event->group_entry, list); } else { + if (group_leader->group_flags & PERF_GROUP_SOFTWARE && + !is_software_event(event)) + group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; + list_add_tail(&event->group_entry, &group_leader->sibling_list); group_leader->nr_siblings++; } @@ -372,6 +379,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) list = ctx_group_list(event, ctx); list_move_tail(&sibling->group_entry, list); sibling->group_leader = sibling; + + /* Inherit group flags from the previous leader */ + sibling->group_flags = event->group_flags; } } @@ -699,24 +709,6 @@ group_error: return -EAGAIN; } -/* - * Return 1 for a group consisting entirely of software events, - * 0 if the group contains any hardware events. - */ -static int is_software_only_group(struct perf_event *leader) -{ - struct perf_event *event; - - if (!is_software_event(leader)) - return 0; - - list_for_each_entry(event, &leader->sibling_list, group_entry) - if (!is_software_event(event)) - return 0; - - return 1; -} - /* * Work out whether we can put this event group on the CPU now. */ @@ -727,7 +719,7 @@ static int group_can_go_on(struct perf_event *event, /* * Groups consisting entirely of software events can always go on. */ - if (is_software_only_group(event)) + if (event->group_flags & PERF_GROUP_SOFTWARE) return 1; /* * If an exclusive group is already on, no other hardware -- cgit v1.2.3-58-ga151 From 69e3f52d1b1a3ed4390bb8a09bb1324265af7fbf Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 16 Jan 2010 14:21:15 +0100 Subject: perf: Fix implicit declaration of getline in util.c getline() is considered as undeclared in util/util.c because it includes string.h, that in turn includes stdio.h, without having defined _GNU_SOURCE. But util.c also includes util.h that handles the _GNU_SOURCE and all the needed inclusions already. Let's include only util.h and sys/mman.h which is the only one header not handled by util.h This fixes the following build error: util/util.c: In function 'slow_copyfile': util/util.c:49: erreur: implicit declaration of function 'getline' util/util.c:49: erreur: nested extern declaration of 'getline' Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras LKML-Reference: <1263648075-3858-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/util.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index f0685849b244..f9b890fde681 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -1,10 +1,5 @@ -#include -#include -#include -#include -#include -#include #include "util.h" +#include int mkdir_p(char *path, mode_t mode) { -- cgit v1.2.3-58-ga151 From 0eda7385db1f30271ade830a231006938a76fb53 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sat, 16 Jan 2010 21:31:16 +0900 Subject: perf probe: Fix build error of builtin-probe.c I got this build error when building tip tree: | cc1: warnings being treated as errors | builtin-probe.c:123: error: 'opt_show_lines' defined but not used This error is caused by: | #ifndef NO_LIBDWARF | OPT_CALLBACK('L', "line", NULL, | "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]", | "Show source code lines.", opt_show_lines), | #endif My environment defines NO_LIBDWARF, so gcc treated opt_show_lines() as garbage. So I moved opt_show_lines() into #ifndef NO_LIBDWARF ... #endif block. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Masami Hiramatsu Cc: Mike Galbraith LKML-Reference: <1263645076-9993-1-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 1d3a99ea5ce1..34f2acb1cc88 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -118,15 +118,6 @@ static int opt_del_probe_event(const struct option *opt __used, return 0; } -static int opt_show_lines(const struct option *opt __used, - const char *str, int unset __used) -{ - if (str) - parse_line_range_desc(str, &session.line_range); - INIT_LIST_HEAD(&session.line_range.line_list); - session.show_lines = true; - return 0; -} /* Currently just checking function name from symbol map */ static void evaluate_probe_point(struct probe_point *pp) { @@ -148,6 +139,16 @@ static int open_vmlinux(void) pr_debug("Try to open %s\n", session.kmap->dso->long_name); return open(session.kmap->dso->long_name, O_RDONLY); } + +static int opt_show_lines(const struct option *opt __used, + const char *str, int unset __used) +{ + if (str) + parse_line_range_desc(str, &session.line_range); + INIT_LIST_HEAD(&session.line_range.line_list); + session.show_lines = true; + return 0; +} #endif static const char * const probe_usage[] = { -- cgit v1.2.3-58-ga151 From 231e36f4d2e63dd770db80b9f5113310c2bcfcfd Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 14 Jan 2010 00:12:12 -0500 Subject: tracing/kprobe: Update kprobe tracing self test for new syntax Update kprobe tracing self test for new syntax (it supports deleting individual probes, and drops $argN support) and behavior change (new probes are disabled in default). This selftest includes the following checks: - Adding function-entry probe and return probe with arguments. - Enabling these probes. - Deleting it individually. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <20100114051211.7814.29436.stgit@localhost6.localdomain6> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 55 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7ac728ded964..d6266cad6953 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1507,28 +1507,67 @@ static int kprobe_trace_selftest_target(int a1, int a2, int a3, static __init int kprobe_trace_self_tests_init(void) { - int ret; + int ret, warn = 0; int (*target)(int, int, int, int, int, int); + struct trace_probe *tp; target = kprobe_trace_selftest_target; pr_info("Testing kprobe tracing: "); ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " - "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); - if (WARN_ON_ONCE(ret)) - pr_warning("error enabling function entry\n"); + "$stack $stack0 +0($stack)"); + if (WARN_ON_ONCE(ret)) { + pr_warning("error on probing function entry.\n"); + warn++; + } else { + /* Enable trace point */ + tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM); + if (WARN_ON_ONCE(tp == NULL)) { + pr_warning("error on getting new probe.\n"); + warn++; + } else + probe_event_enable(&tp->call); + } ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " "$retval"); - if (WARN_ON_ONCE(ret)) - pr_warning("error enabling function return\n"); + if (WARN_ON_ONCE(ret)) { + pr_warning("error on probing function return.\n"); + warn++; + } else { + /* Enable trace point */ + tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM); + if (WARN_ON_ONCE(tp == NULL)) { + pr_warning("error on getting new probe.\n"); + warn++; + } else + probe_event_enable(&tp->call); + } + + if (warn) + goto end; ret = target(1, 2, 3, 4, 5, 6); - cleanup_all_probes(); + ret = command_trace_probe("-:testprobe"); + if (WARN_ON_ONCE(ret)) { + pr_warning("error on deleting a probe.\n"); + warn++; + } + + ret = command_trace_probe("-:testprobe2"); + if (WARN_ON_ONCE(ret)) { + pr_warning("error on deleting a probe.\n"); + warn++; + } - pr_cont("OK\n"); +end: + cleanup_all_probes(); + if (warn) + pr_cont("NG: Some tests are failed. Please check them.\n"); + else + pr_cont("OK\n"); return 0; } -- cgit v1.2.3-58-ga151 From 42cce92f4ddfa41e2dfe26fdcad4887943c032f2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 17 Jan 2010 10:36:08 +0100 Subject: perf: Make __perf_event_sched_out static __perf_event_sched_out doesn't need to be globally available, make it static. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- kernel/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index eae6ff693604..c4e90b8cd60d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1049,8 +1049,8 @@ static int perf_event_refresh(struct perf_event *event, int refresh) return 0; } -void __perf_event_sched_out(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) +static void __perf_event_sched_out(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx) { struct perf_event *event; -- cgit v1.2.3-58-ga151 From 5b0311e1f2464547fc6f17a82d7ea2538c8c7a70 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 17 Jan 2010 11:59:13 +0100 Subject: perf: Allow pinned and flexible groups to be scheduled separately Tune the scheduling helpers so that we can choose to schedule either pinned and/or flexible groups from a context. And while at it, refactor a bit the naming of these helpers to make these more consistent and flexible. There is no (intended) change in scheduling behaviour in this patch. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- kernel/perf_event.c | 137 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 93 insertions(+), 44 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c4e90b8cd60d..bfc4ee015c87 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1049,8 +1049,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh) return 0; } -static void __perf_event_sched_out(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) +enum event_type_t { + EVENT_FLEXIBLE = 0x1, + EVENT_PINNED = 0x2, + EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, +}; + +static void ctx_sched_out(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + enum event_type_t event_type) { struct perf_event *event; @@ -1061,13 +1068,18 @@ static void __perf_event_sched_out(struct perf_event_context *ctx, update_context_time(ctx); perf_disable(); - if (ctx->nr_active) { + if (!ctx->nr_active) + goto out_enable; + + if (event_type & EVENT_PINNED) list_for_each_entry(event, &ctx->pinned_groups, group_entry) group_sched_out(event, cpuctx, ctx); + if (event_type & EVENT_FLEXIBLE) list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); - } + + out_enable: perf_enable(); out: raw_spin_unlock(&ctx->lock); @@ -1229,15 +1241,13 @@ void perf_event_task_sched_out(struct task_struct *task, rcu_read_unlock(); if (do_switch) { - __perf_event_sched_out(ctx, cpuctx); + ctx_sched_out(ctx, cpuctx, EVENT_ALL); cpuctx->task_ctx = NULL; } } -/* - * Called with IRQs disabled - */ -static void __perf_event_task_sched_out(struct perf_event_context *ctx) +static void task_ctx_sched_out(struct perf_event_context *ctx, + enum event_type_t event_type) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); @@ -1247,39 +1257,34 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx) if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) return; - __perf_event_sched_out(ctx, cpuctx); + ctx_sched_out(ctx, cpuctx, event_type); cpuctx->task_ctx = NULL; } /* * Called with IRQs disabled */ -static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) +static void __perf_event_task_sched_out(struct perf_event_context *ctx) { - __perf_event_sched_out(&cpuctx->ctx, cpuctx); + task_ctx_sched_out(ctx, EVENT_ALL); +} + +/* + * Called with IRQs disabled + */ +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type) +{ + ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); } static void -__perf_event_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) +ctx_pinned_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + int cpu) { - int cpu = smp_processor_id(); struct perf_event *event; - int can_add_hw = 1; - - raw_spin_lock(&ctx->lock); - ctx->is_active = 1; - if (likely(!ctx->nr_events)) - goto out; - - ctx->timestamp = perf_clock(); - - perf_disable(); - /* - * First go through the list and put on any pinned groups - * in order to give them the best chance of going on. - */ list_for_each_entry(event, &ctx->pinned_groups, group_entry) { if (event->state <= PERF_EVENT_STATE_OFF) continue; @@ -1298,6 +1303,15 @@ __perf_event_sched_in(struct perf_event_context *ctx, event->state = PERF_EVENT_STATE_ERROR; } } +} + +static void +ctx_flexible_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + int cpu) +{ + struct perf_event *event; + int can_add_hw = 1; list_for_each_entry(event, &ctx->flexible_groups, group_entry) { /* Ignore events in OFF or ERROR state */ @@ -1314,11 +1328,53 @@ __perf_event_sched_in(struct perf_event_context *ctx, if (group_sched_in(event, cpuctx, ctx, cpu)) can_add_hw = 0; } +} + +static void +ctx_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + enum event_type_t event_type) +{ + int cpu = smp_processor_id(); + + raw_spin_lock(&ctx->lock); + ctx->is_active = 1; + if (likely(!ctx->nr_events)) + goto out; + + ctx->timestamp = perf_clock(); + + perf_disable(); + + /* + * First go through the list and put on any pinned groups + * in order to give them the best chance of going on. + */ + if (event_type & EVENT_PINNED) + ctx_pinned_sched_in(ctx, cpuctx, cpu); + + /* Then walk through the lower prio flexible groups */ + if (event_type & EVENT_FLEXIBLE) + ctx_flexible_sched_in(ctx, cpuctx, cpu); + perf_enable(); out: raw_spin_unlock(&ctx->lock); } +static void task_ctx_sched_in(struct task_struct *task, + enum event_type_t event_type) +{ + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event_context *ctx = task->perf_event_ctxp; + + if (likely(!ctx)) + return; + if (cpuctx->task_ctx == ctx) + return; + ctx_sched_in(ctx, cpuctx, event_type); + cpuctx->task_ctx = ctx; +} /* * Called from scheduler to add the events of the current task * with interrupts disabled. @@ -1332,22 +1388,15 @@ __perf_event_sched_in(struct perf_event_context *ctx, */ void perf_event_task_sched_in(struct task_struct *task) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); - struct perf_event_context *ctx = task->perf_event_ctxp; - - if (likely(!ctx)) - return; - if (cpuctx->task_ctx == ctx) - return; - __perf_event_sched_in(ctx, cpuctx); - cpuctx->task_ctx = ctx; + task_ctx_sched_in(task, EVENT_ALL); } -static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx) +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type) { struct perf_event_context *ctx = &cpuctx->ctx; - __perf_event_sched_in(ctx, cpuctx); + ctx_sched_in(ctx, cpuctx, event_type); } #define MAX_INTERRUPTS (~0ULL) @@ -1476,17 +1525,17 @@ void perf_event_task_tick(struct task_struct *curr) if (ctx) perf_ctx_adjust_freq(ctx); - perf_event_cpu_sched_out(cpuctx); + cpu_ctx_sched_out(cpuctx, EVENT_ALL); if (ctx) - __perf_event_task_sched_out(ctx); + task_ctx_sched_out(ctx, EVENT_ALL); rotate_ctx(&cpuctx->ctx); if (ctx) rotate_ctx(ctx); - perf_event_cpu_sched_in(cpuctx); + cpu_ctx_sched_in(cpuctx, EVENT_ALL); if (ctx) - perf_event_task_sched_in(curr); + task_ctx_sched_in(curr, EVENT_ALL); } static int event_enable_on_exec(struct perf_event *event, -- cgit v1.2.3-58-ga151 From 7defb0f879bbcfe29e3c6f29d685d4f29b7a0700 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 17 Jan 2010 12:15:31 +0100 Subject: perf: Don't schedule out/in pinned events on task tick We don't need to schedule in/out pinned events on task tick, now that pinned and flexible groups can be scheduled separately. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- kernel/perf_event.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index bfc4ee015c87..a90ae694cbc1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1525,17 +1525,17 @@ void perf_event_task_tick(struct task_struct *curr) if (ctx) perf_ctx_adjust_freq(ctx); - cpu_ctx_sched_out(cpuctx, EVENT_ALL); + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) - task_ctx_sched_out(ctx, EVENT_ALL); + task_ctx_sched_out(ctx, EVENT_FLEXIBLE); rotate_ctx(&cpuctx->ctx); if (ctx) rotate_ctx(ctx); - cpu_ctx_sched_in(cpuctx, EVENT_ALL); + cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) - task_ctx_sched_in(curr, EVENT_ALL); + task_ctx_sched_in(curr, EVENT_FLEXIBLE); } static int event_enable_on_exec(struct perf_event *event, -- cgit v1.2.3-58-ga151 From 329c0e012b99fa2325a0be205c052e4aba690f16 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 17 Jan 2010 12:56:05 +0100 Subject: perf: Better order flexible and pinned scheduling When a task gets scheduled in. We don't touch the cpu bound events so the priority order becomes: cpu pinned, cpu flexible, task pinned, task flexible. So schedule out cpu flexibles when a new task context gets in and correctly order the groups to schedule in: task pinned, cpu flexible, task flexible. Cpu pinned groups don't need to be touched at this time. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- kernel/perf_event.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a90ae694cbc1..edc46b92b508 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1362,6 +1362,14 @@ ctx_sched_in(struct perf_event_context *ctx, raw_spin_unlock(&ctx->lock); } +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type) +{ + struct perf_event_context *ctx = &cpuctx->ctx; + + ctx_sched_in(ctx, cpuctx, event_type); +} + static void task_ctx_sched_in(struct task_struct *task, enum event_type_t event_type) { @@ -1388,15 +1396,27 @@ static void task_ctx_sched_in(struct task_struct *task, */ void perf_event_task_sched_in(struct task_struct *task) { - task_ctx_sched_in(task, EVENT_ALL); -} + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event_context *ctx = task->perf_event_ctxp; -static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, - enum event_type_t event_type) -{ - struct perf_event_context *ctx = &cpuctx->ctx; + if (likely(!ctx)) + return; - ctx_sched_in(ctx, cpuctx, event_type); + if (cpuctx->task_ctx == ctx) + return; + + /* + * We want to keep the following priority order: + * cpu pinned (that don't need to move), task pinned, + * cpu flexible, task flexible. + */ + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); + + ctx_sched_in(ctx, cpuctx, EVENT_PINNED); + cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); + + cpuctx->task_ctx = ctx; } #define MAX_INTERRUPTS (~0ULL) -- cgit v1.2.3-58-ga151 From 580d9e00fdfb85e65c5097dcd739c6efcdbadc96 Mon Sep 17 00:00:00 2001 From: Motohiro KOSAKI Date: Mon, 18 Jan 2010 21:35:05 -0500 Subject: kprobetrace, doc: Shell needs single quote to use $ character Shell interprets $val as shell variable, thus we need quote if we use the echo command. Signed-off-by: KOSAKI Motohiro Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE LKML-Reference: <20100119023505.31880.17367.stgit@localhost6.localdomain6> Signed-off-by: Ingo Molnar --- Documentation/trace/kprobetrace.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index f30978e001f8..ab57f02e53bb 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -79,7 +79,7 @@ Usage examples To add a probe as a new event, write a new definition to kprobe_events as below. - echo p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack) > /sys/kernel/debug/tracing/kprobe_events + echo 'p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack)' > /sys/kernel/debug/tracing/kprobe_events This sets a kprobe on the top of do_sys_open() function with recording 1st to 4th arguments as "myprobe" event. Note, which register/stack entry is @@ -88,7 +88,7 @@ the ABI, please try to use probe subcommand of perf-tools (you can find it under tools/perf/). As this example shows, users can choose more familiar names for each arguments. - echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events + echo 'r:myretprobe do_sys_open $retval' >> /sys/kernel/debug/tracing/kprobe_events This sets a kretprobe on the return point of do_sys_open() function with recording return value as "myretprobe" event. -- cgit v1.2.3-58-ga151 From df3ab708b787a2b35de5101452bd51d4a8ae0ded Mon Sep 17 00:00:00 2001 From: Motohiro KOSAKI Date: Mon, 18 Jan 2010 21:35:12 -0500 Subject: kprobetrace, doc: Add the explanation to remove probe points Latest kprobetrace can remove probe points selectively, thus the documentation should be updated too. Signed-off-by: KOSAKI Motohiro Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE LKML-Reference: <20100119023512.31880.35535.stgit@localhost6.localdomain6> Signed-off-by: Ingo Molnar --- Documentation/trace/kprobetrace.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index ab57f02e53bb..a9100b28eb84 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -24,6 +24,7 @@ Synopsis of kprobe_events ------------------------- p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe + -:[GRP/]EVENT : Clear a probe GRP : Group name. If omitted, use "kprobes" for it. EVENT : Event name. If omitted, the event name is generated @@ -122,6 +123,12 @@ REC->dfd, REC->filename, REC->flags, REC->mode This clears all probe points. + Or, + + echo -:myprobe >> kprobe_events + + This clears probe points selectively. + Right after definition, each event is disabled by default. For tracing these events, you need to enable it. -- cgit v1.2.3-58-ga151 From e8d433f335d44028d41af231ef5c52fd8a9b280b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 18 Jan 2010 15:59:19 -0200 Subject: perf archive: Add documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This also makes it appear on the 'perf --help' output, i.e. util/generate-cmdlist.sh now takes it into account. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263837559-24168-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-archive.txt | 22 ++++++++++++++++++++++ tools/perf/command-list.txt | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 tools/perf/Documentation/perf-archive.txt diff --git a/tools/perf/Documentation/perf-archive.txt b/tools/perf/Documentation/perf-archive.txt new file mode 100644 index 000000000000..fae174dc7d01 --- /dev/null +++ b/tools/perf/Documentation/perf-archive.txt @@ -0,0 +1,22 @@ +perf-archive(1) +=============== + +NAME +---- +perf-archive - Create archive with object files with build-ids found in perf.data file + +SYNOPSIS +-------- +[verse] +'perf archive' [file] + +DESCRIPTION +----------- +This command runs runs perf-buildid-list --with-hits, and collects the files +with the buildids found so that analisys of perf.data contents can be possible +on another machine. + + +SEE ALSO +-------- +linkperf:perf-record[1], linkperf:perf-buildid-list[1], linkperf:perf-report[1] diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index f73d1d90f5bd..cf6444dfd73a 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -3,7 +3,7 @@ # command name category [deprecated] [common] # perf-annotate mainporcelain common -perf-archive mainporcelain +perf-archive mainporcelain common perf-bench mainporcelain common perf-buildid-list mainporcelain common perf-diff mainporcelain common -- cgit v1.2.3-58-ga151 From d5526d8cb8e5aa3349c1ff4e409ad9b4cdac380c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 18 Jan 2010 18:21:42 -0200 Subject: perf archive: Fix installation steps in the Makefile MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warning: acme@parisc:~/git/linux-2.6-tip$ make -C tools/perf/ install make: Entering directory `/home/acme/git/linux-2.6-tip/tools/perf' Makefile:833: warning: overriding commands for target `perf-archive' Makefile:822: warning: ignoring old commands for target `perf-archive' Reported-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263846102-24841-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d739552036d0..ddbeeee9ade2 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -311,7 +311,6 @@ PROGRAMS += perf # List built-in command $C whose implementation cmd_$C() is not in # builtin-$C.o but is linked in as part of some other command. # -BUILT_INS += perf-archive # what 'all' will build and 'install' will install, in perfexecdir ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) @@ -1004,6 +1003,7 @@ install: all $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' + $(INSTALL) perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' -- cgit v1.2.3-58-ga151 From f162f87ad6e98e8bfb2362955da46bed7b2514be Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 19 Jan 2010 10:36:13 -0200 Subject: perf symbols: Set dso->kernel when handling the fake vmlinux MMAP event MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because it may be possible that there was no buildid section, where we would set this to 1. Found while analysing a perf.data file collected on an ARM machine where an explicitely specified vmlinux was being disregarded. Reported-by: Jamie Iles Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263904574-30732-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index dc13cad828d7..bbaee61c1683 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -373,6 +373,7 @@ int event__process_mmap(event_t *self, struct perf_session *session) if (kernel == NULL) goto out_problem; + kernel->kernel = 1; if (__map_groups__create_kernel_maps(&session->kmaps, session->vmlinux_maps, kernel) < 0) -- cgit v1.2.3-58-ga151 From dc8d6ab2b61a2d92b5d7438565ccd20b29724cb2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 19 Jan 2010 10:36:14 -0200 Subject: perf symbols: Use only --vmlinux if specified MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Found while analysing a perf.data file collected on an ARM machine where an explicitely specified vmlinux was being disregarded. Reported-by: Jamie Iles Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1263904574-30732-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 67 +++++++++++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 26 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a4e745934584..b6ab23dd5f9f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1572,7 +1572,7 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return -1; dso__set_loaded(self, map->type); - err = dso__load_sym(self, map, session, self->long_name, fd, filter, 1, 0); + err = dso__load_sym(self, map, session, vmlinux, fd, filter, 1, 0); close(fd); return err; @@ -1584,6 +1584,26 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, int err; const char *kallsyms_filename = NULL; char *kallsyms_allocated_filename = NULL; + /* + * Step 1: if the user specified a vmlinux filename, use it and only + * it, reporting errors to the user if it cannot be used. + * + * For instance, try to analyse an ARM perf.data file _without_ a + * build-id, or if the user specifies the wrong path to the right + * vmlinux file, obviously we can't fallback to another vmlinux (a + * x86_86 one, on the machine where analysis is being performed, say), + * or worse, /proc/kallsyms. + * + * If the specified file _has_ a build-id and there is a build-id + * section in the perf.data file, we will still do the expected + * validation in dso__load_vmlinux and will bail out if they don't + * match. + */ + if (symbol_conf.vmlinux_name != NULL) { + err = dso__load_vmlinux(self, map, session, + symbol_conf.vmlinux_name, filter); + goto out_try_fixup; + } if (vmlinux_path != NULL) { int i; @@ -1618,46 +1638,41 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, goto do_kallsyms; } } - + /* + * Now look if we have it on the build-id cache in + * $HOME/.debug/[kernel.kallsyms]. + */ build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); if (asprintf(&kallsyms_allocated_filename, "%s/.debug/[kernel.kallsyms]/%s", - getenv("HOME"), sbuild_id) != -1) { - if (access(kallsyms_filename, F_OK)) { - kallsyms_filename = kallsyms_allocated_filename; - goto do_kallsyms; - } + getenv("HOME"), sbuild_id) == -1) + return -1; + + if (access(kallsyms_filename, F_OK)) { free(kallsyms_allocated_filename); - kallsyms_allocated_filename = NULL; + return -1; } - goto do_vmlinux; - } - - if (self->long_name[0] == '[') { + kallsyms_filename = kallsyms_allocated_filename; + } else { + /* + * Last resort, if we don't have a build-id and couldn't find + * any vmlinux file, try the running kernel kallsyms table. + */ kallsyms_filename = "/proc/kallsyms"; - goto do_kallsyms; } -do_vmlinux: - err = dso__load_vmlinux(self, map, session, self->long_name, filter); - if (err <= 0) { - if (self->has_build_id) - return -1; - - pr_info("The file %s cannot be used, " - "trying to use /proc/kallsyms...", self->long_name); do_kallsyms: - err = dso__load_kallsyms(self, kallsyms_filename, map, session, filter); - if (err > 0 && kallsyms_filename == NULL) - dso__set_long_name(self, strdup("[kernel.kallsyms]")); - free(kallsyms_allocated_filename); - } + err = dso__load_kallsyms(self, kallsyms_filename, map, session, filter); + free(kallsyms_allocated_filename); +out_try_fixup: if (err > 0) { out_fixup: + if (kallsyms_filename == NULL) + dso__set_long_name(self, strdup("[kernel.kallsyms]")); map__fixup_start(map); map__fixup_end(map); } -- cgit v1.2.3-58-ga151 From ef12a141306c90336a3a10d40213ecd98624d274 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 20 Jan 2010 15:28:45 -0200 Subject: perf buildid-cache: Add new command to manage build-id cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For now it just has operations to examine a given file, find its build-id and add or remove it to/from the cache. Useful, for instance, when adding binaries sent together with a perf.data file, so that we can add them to the cache and have the tools find it when resolving symbols. It'll also manage the size of the cache like 'ccache' does. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264008525-29025-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-buildid-cache.txt | 33 ++++++ tools/perf/Makefile | 1 + tools/perf/builtin-buildid-cache.c | 133 ++++++++++++++++++++++++ tools/perf/builtin.h | 1 + tools/perf/command-list.txt | 1 + tools/perf/perf.c | 1 + tools/perf/util/header.c | 72 +++++++++++-- tools/perf/util/header.h | 5 + tools/perf/util/symbol.c | 4 +- tools/perf/util/symbol.h | 2 +- 10 files changed, 241 insertions(+), 12 deletions(-) create mode 100644 tools/perf/Documentation/perf-buildid-cache.txt create mode 100644 tools/perf/builtin-buildid-cache.c diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt new file mode 100644 index 000000000000..88bc3b519746 --- /dev/null +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -0,0 +1,33 @@ +perf-buildid-cache(1) +===================== + +NAME +---- +perf-buildid-cache - Manage build-id cache. + +SYNOPSIS +-------- +[verse] +'perf buildid-list ' + +DESCRIPTION +----------- +This command manages the build-id cache. It can add and remove files to the +cache. In the future it should as well purge older entries, set upper limits +for the space used by the cache, etc. + +OPTIONS +------- +-a:: +--add=:: + Add specified file to the cache. +-r:: +--remove=:: + Remove specified file to the cache. +-v:: +--verbose:: + Be more verbose. + +SEE ALSO +-------- +linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/Makefile b/tools/perf/Makefile index ddbeeee9ade2..9b173e66fb41 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -445,6 +445,7 @@ BUILTIN_OBJS += builtin-diff.o BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-sched.o BUILTIN_OBJS += builtin-buildid-list.o +BUILTIN_OBJS += builtin-buildid-cache.o BUILTIN_OBJS += builtin-list.o BUILTIN_OBJS += builtin-record.o BUILTIN_OBJS += builtin-report.o diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c new file mode 100644 index 000000000000..30a05f552c96 --- /dev/null +++ b/tools/perf/builtin-buildid-cache.c @@ -0,0 +1,133 @@ +/* + * builtin-buildid-cache.c + * + * Builtin buildid-cache command: Manages build-id cache + * + * Copyright (C) 2010, Red Hat Inc. + * Copyright (C) 2010, Arnaldo Carvalho de Melo + */ +#include "builtin.h" +#include "perf.h" +#include "util/cache.h" +#include "util/debug.h" +#include "util/header.h" +#include "util/parse-options.h" +#include "util/strlist.h" +#include "util/symbol.h" + +static char const *add_name_list_str, *remove_name_list_str; + +static const char * const buildid_cache_usage[] = { + "perf buildid-cache []", + NULL +}; + +static const struct option buildid_cache_options[] = { + OPT_STRING('a', "add", &add_name_list_str, + "file list", "file(s) to add"), + OPT_STRING('r', "remove", &remove_name_list_str, "file list", + "file(s) to remove"), + OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"), + OPT_END() +}; + +static int build_id_cache__add_file(const char *filename, const char *debugdir) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + u8 build_id[BUILD_ID_SIZE]; + int err; + + if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) { + pr_debug("Couldn't read a build-id in %s\n", filename); + return -1; + } + + build_id__sprintf(build_id, sizeof(build_id), sbuild_id); + err = build_id_cache__add_s(sbuild_id, debugdir, filename, false); + if (verbose) + pr_info("Adding %s %s: %s\n", sbuild_id, filename, + err ? "FAIL" : "Ok"); + return err; +} + +static int build_id_cache__remove_file(const char *filename __used, + const char *debugdir __used) +{ + u8 build_id[BUILD_ID_SIZE]; + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + int err; + + if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) { + pr_debug("Couldn't read a build-id in %s\n", filename); + return -1; + } + + build_id__sprintf(build_id, sizeof(build_id), sbuild_id); + err = build_id_cache__remove_s(sbuild_id, debugdir); + if (verbose) + pr_info("Removing %s %s: %s\n", sbuild_id, filename, + err ? "FAIL" : "Ok"); + + return err; +} + +static int __cmd_buildid_cache(void) +{ + struct strlist *list; + struct str_node *pos; + char debugdir[PATH_MAX]; + + snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"), + DEBUG_CACHE_DIR); + + if (add_name_list_str) { + list = strlist__new(true, add_name_list_str); + if (list) { + strlist__for_each(pos, list) + if (build_id_cache__add_file(pos->s, debugdir)) { + if (errno == EEXIST) { + pr_debug("%s already in the cache\n", + pos->s); + continue; + } + pr_warning("Couldn't add %s: %s\n", + pos->s, strerror(errno)); + } + + strlist__delete(list); + } + } + + if (remove_name_list_str) { + list = strlist__new(true, remove_name_list_str); + if (list) { + strlist__for_each(pos, list) + if (build_id_cache__remove_file(pos->s, debugdir)) { + if (errno == ENOENT) { + pr_debug("%s wasn't in the cache\n", + pos->s); + continue; + } + pr_warning("Couldn't remove %s: %s\n", + pos->s, strerror(errno)); + } + + strlist__delete(list); + } + } + + return 0; +} + +int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used) +{ + argc = parse_options(argc, argv, buildid_cache_options, + buildid_cache_usage, 0); + + if (symbol__init() < 0) + return -1; + + setup_pager(); + return __cmd_buildid_cache(); +} diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index 18035b1f16c7..dee97cfe3794 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -16,6 +16,7 @@ extern int check_pager_config(const char *cmd); extern int cmd_annotate(int argc, const char **argv, const char *prefix); extern int cmd_bench(int argc, const char **argv, const char *prefix); +extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix); extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); extern int cmd_diff(int argc, const char **argv, const char *prefix); extern int cmd_help(int argc, const char **argv, const char *prefix); diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index cf6444dfd73a..9afcff2e3ae5 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -5,6 +5,7 @@ perf-annotate mainporcelain common perf-archive mainporcelain common perf-bench mainporcelain common +perf-buildid-cache mainporcelain common perf-buildid-list mainporcelain common perf-diff mainporcelain common perf-list mainporcelain common diff --git a/tools/perf/perf.c b/tools/perf/perf.c index fc89005c3e51..05c861c045d5 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -285,6 +285,7 @@ static void handle_internal_command(int argc, const char **argv) { const char *cmd = argv[0]; static struct cmd_struct commands[] = { + { "buildid-cache", cmd_buildid_cache, 0 }, { "buildid-list", cmd_buildid_list, 0 }, { "diff", cmd_diff, 0 }, { "help", cmd_help, 0 }, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 1b65fed0dd2d..2bb2bdb1f456 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -231,32 +231,29 @@ static int dsos__write_buildid_table(int fd) return err; } -static int dso__cache_build_id(struct dso *self, const char *debugdir) +int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, + const char *name, bool is_kallsyms) { const size_t size = PATH_MAX; char *filename = malloc(size), - *linkname = malloc(size), *targetname, *sbuild_id; + *linkname = malloc(size), *targetname; int len, err = -1; - bool is_kallsyms = self->kernel && self->long_name[0] != '/'; if (filename == NULL || linkname == NULL) goto out_free; len = snprintf(filename, size, "%s%s%s", - debugdir, is_kallsyms ? "/" : "", self->long_name); + debugdir, is_kallsyms ? "/" : "", name); if (mkdir_p(filename, 0755)) goto out_free; - len += snprintf(filename + len, sizeof(filename) - len, "/"); - sbuild_id = filename + len; - build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); + snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); if (access(filename, F_OK)) { if (is_kallsyms) { if (copyfile("/proc/kallsyms", filename)) goto out_free; - } else if (link(self->long_name, filename) && - copyfile(self->long_name, filename)) + } else if (link(name, filename) && copyfile(name, filename)) goto out_free; } @@ -278,6 +275,63 @@ out_free: return err; } +static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, + const char *name, const char *debugdir, + bool is_kallsyms) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(build_id, build_id_size, sbuild_id); + + return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); +} + +int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) +{ + const size_t size = PATH_MAX; + char *filename = malloc(size), + *linkname = malloc(size); + int err = -1; + + if (filename == NULL || linkname == NULL) + goto out_free; + + snprintf(linkname, size, "%s/.build-id/%.2s/%s", + debugdir, sbuild_id, sbuild_id + 2); + + if (access(linkname, F_OK)) + goto out_free; + + if (readlink(linkname, filename, size) < 0) + goto out_free; + + if (unlink(linkname)) + goto out_free; + + /* + * Since the link is relative, we must make it absolute: + */ + snprintf(linkname, size, "%s/.build-id/%.2s/%s", + debugdir, sbuild_id, filename); + + if (unlink(linkname)) + goto out_free; + + err = 0; +out_free: + free(filename); + free(linkname); + return err; +} + +static int dso__cache_build_id(struct dso *self, const char *debugdir) +{ + bool is_kallsyms = self->kernel && self->long_name[0] != '/'; + + return build_id_cache__add_b(self->build_id, sizeof(self->build_id), + self->long_name, debugdir, is_kallsyms); +} + static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) { struct dso *pos; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index ccc8540feccd..82a6af72d4cc 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -5,6 +5,7 @@ #include #include #include "types.h" +#include "event.h" #include @@ -84,4 +85,8 @@ int perf_header__process_sections(struct perf_header *self, int fd, struct perf_header *ph, int feat, int fd)); +int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, + const char *name, bool is_kallsyms); +int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); + #endif /* __PERF_HEADER_H */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b6ab23dd5f9f..6f30fe18c265 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -345,10 +345,10 @@ void dso__sort_by_name(struct dso *self, enum map_type type) &self->symbols[type]); } -int build_id__sprintf(u8 *self, int len, char *bf) +int build_id__sprintf(const u8 *self, int len, char *bf) { char *bid = bf; - u8 *raw = self; + const u8 *raw = self; int i; for (i = 0; i < len; ++i) { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 525085fd0735..ffe0b0f2e5d3 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -144,7 +144,7 @@ struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); -int build_id__sprintf(u8 *self, int len, char *bf); +int build_id__sprintf(const u8 *self, int len, char *bf); int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, char type, u64 start)); -- cgit v1.2.3-58-ga151 From abd50713944c8ea9e0af5b7bffa0aacae21cc91a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 26 Jan 2010 18:50:16 +0100 Subject: perf: Reimplement frequency driven sampling There was a bug in the old period code that caused intel_pmu_enable_all() or native_write_msr_safe() to show up quite high in the profiles. In staring at that code it made my head hurt, so I rewrote it in a hopefully simpler fashion. Its now fully symetric between tick and overflow driven adjustments and uses less data to boot. The only complication is that it basically wants to do a u128 division. The code approximates that in a rather simple truncate until it fits fashion, taking care to balance the terms while truncating. This version does not generate that sampling artefact. Signed-off-by: Peter Zijlstra LKML-Reference: Cc: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 5 +- kernel/perf_event.c | 132 +++++++++++++++++++++++++++++++-------------- 2 files changed, 94 insertions(+), 43 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c6f812e4d058..72b2615600d8 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -498,9 +498,8 @@ struct hw_perf_event { atomic64_t period_left; u64 interrupts; - u64 freq_count; - u64 freq_interrupts; - u64 freq_stamp; + u64 freq_time_stamp; + u64 freq_count_stamp; #endif }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index edc46b92b508..251fb9552492 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1423,14 +1423,83 @@ void perf_event_task_sched_in(struct task_struct *task) static void perf_log_throttle(struct perf_event *event, int enable); -static void perf_adjust_period(struct perf_event *event, u64 events) +static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) +{ + u64 frequency = event->attr.sample_freq; + u64 sec = NSEC_PER_SEC; + u64 divisor, dividend; + + int count_fls, nsec_fls, frequency_fls, sec_fls; + + count_fls = fls64(count); + nsec_fls = fls64(nsec); + frequency_fls = fls64(frequency); + sec_fls = 30; + + /* + * We got @count in @nsec, with a target of sample_freq HZ + * the target period becomes: + * + * @count * 10^9 + * period = ------------------- + * @nsec * sample_freq + * + */ + + /* + * Reduce accuracy by one bit such that @a and @b converge + * to a similar magnitude. + */ +#define REDUCE_FLS(a, b) \ +do { \ + if (a##_fls > b##_fls) { \ + a >>= 1; \ + a##_fls--; \ + } else { \ + b >>= 1; \ + b##_fls--; \ + } \ +} while (0) + + /* + * Reduce accuracy until either term fits in a u64, then proceed with + * the other, so that finally we can do a u64/u64 division. + */ + while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { + REDUCE_FLS(nsec, frequency); + REDUCE_FLS(sec, count); + } + + if (count_fls + sec_fls > 64) { + divisor = nsec * frequency; + + while (count_fls + sec_fls > 64) { + REDUCE_FLS(count, sec); + divisor >>= 1; + } + + dividend = count * sec; + } else { + dividend = count * sec; + + while (nsec_fls + frequency_fls > 64) { + REDUCE_FLS(nsec, frequency); + dividend >>= 1; + } + + divisor = nsec * frequency; + } + + return div64_u64(dividend, divisor); +} + +static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) { struct hw_perf_event *hwc = &event->hw; u64 period, sample_period; s64 delta; - events *= hwc->sample_period; - period = div64_u64(events, event->attr.sample_freq); + period = perf_calculate_period(event, nsec, count); delta = (s64)(period - hwc->sample_period); delta = (delta + 7) / 8; /* low pass filter */ @@ -1441,13 +1510,22 @@ static void perf_adjust_period(struct perf_event *event, u64 events) sample_period = 1; hwc->sample_period = sample_period; + + if (atomic64_read(&hwc->period_left) > 8*sample_period) { + perf_disable(); + event->pmu->disable(event); + atomic64_set(&hwc->period_left, 0); + event->pmu->enable(event); + perf_enable(); + } } static void perf_ctx_adjust_freq(struct perf_event_context *ctx) { struct perf_event *event; struct hw_perf_event *hwc; - u64 interrupts, freq; + u64 interrupts, now; + s64 delta; raw_spin_lock(&ctx->lock); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { @@ -1468,44 +1546,18 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); event->pmu->unthrottle(event); - interrupts = 2*sysctl_perf_event_sample_rate/HZ; } if (!event->attr.freq || !event->attr.sample_freq) continue; - /* - * if the specified freq < HZ then we need to skip ticks - */ - if (event->attr.sample_freq < HZ) { - freq = event->attr.sample_freq; - - hwc->freq_count += freq; - hwc->freq_interrupts += interrupts; - - if (hwc->freq_count < HZ) - continue; - - interrupts = hwc->freq_interrupts; - hwc->freq_interrupts = 0; - hwc->freq_count -= HZ; - } else - freq = HZ; - - perf_adjust_period(event, freq * interrupts); + event->pmu->read(event); + now = atomic64_read(&event->count); + delta = now - hwc->freq_count_stamp; + hwc->freq_count_stamp = now; - /* - * In order to avoid being stalled by an (accidental) huge - * sample period, force reset the sample period if we didn't - * get any events in this freq period. - */ - if (!interrupts) { - perf_disable(); - event->pmu->disable(event); - atomic64_set(&hwc->period_left, 0); - event->pmu->enable(event); - perf_enable(); - } + if (delta > 0) + perf_adjust_period(event, TICK_NSEC, delta); } raw_spin_unlock(&ctx->lock); } @@ -3768,12 +3820,12 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, if (event->attr.freq) { u64 now = perf_clock(); - s64 delta = now - hwc->freq_stamp; + s64 delta = now - hwc->freq_time_stamp; - hwc->freq_stamp = now; + hwc->freq_time_stamp = now; - if (delta > 0 && delta < TICK_NSEC) - perf_adjust_period(event, NSEC_PER_SEC / (int)delta); + if (delta > 0 && delta < 2*TICK_NSEC) + perf_adjust_period(event, delta, hwc->last_period); } /* -- cgit v1.2.3-58-ga151 From 24bfef0f924b4ac4312614422a4982b5f4d9a4c7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 21 Jan 2010 13:04:43 -0200 Subject: perf top: Fix sample counting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Broken since "5b2bb75 perf top: Support userspace symbols too". Reported-by: Mike Galbraith Tested-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264086284-1431-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7a8a77ec2c9d..8b049888a9dd 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -934,8 +934,11 @@ static void event__process_sample(const event_t *self, struct addr_location al; u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + ++samples; + switch (origin) { case PERF_RECORD_MISC_USER: + ++userspace_samples; if (hide_user_symbols) return; break; @@ -960,9 +963,6 @@ static void event__process_sample(const event_t *self, if (list_empty(&syme->node) || !syme->node.next) __list_insert_active_sym(syme); pthread_mutex_unlock(&active_symbols_lock); - if (origin == PERF_RECORD_MISC_USER) - ++userspace_samples; - ++samples; } } -- cgit v1.2.3-58-ga151 From 0f35cd4cea08a8893e3e2ea03cbdb65f5d2b0e7a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 21 Jan 2010 13:04:44 -0200 Subject: perf top: Handle PERF_RECORD_{FORK,EXIT} events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As noticed by Mike, symbols in new tasks were not being processed as we weren't processing these events. Reported-by: Mike Galbraith Tested-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264086284-1431-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 8b049888a9dd..2227b84aa002 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -975,6 +975,10 @@ static int event__process(event_t *event, struct perf_session *session) case PERF_RECORD_MMAP: event__process_mmap(event, session); break; + case PERF_RECORD_FORK: + case PERF_RECORD_EXIT: + event__process_task(event, session); + break; default: break; } -- cgit v1.2.3-58-ga151 From e1c7c6a40c8037478742ce134190c1a955853bfb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 22 Jan 2010 14:35:01 -0200 Subject: perf symbols: Fix inverted logic for showing kallsyms as the source of symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only if we parsed /proc/kallsyms (or a copy found in the buildid cache) we should set the dso long name to "[kernel.kallsyms]". Reported-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264178102-4203-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 6f30fe18c265..1270cf867e61 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1671,7 +1671,7 @@ do_kallsyms: out_try_fixup: if (err > 0) { out_fixup: - if (kallsyms_filename == NULL) + if (kallsyms_filename != NULL) dso__set_long_name(self, strdup("[kernel.kallsyms]")); map__fixup_start(map); map__fixup_end(map); -- cgit v1.2.3-58-ga151 From 19fc2dedff448120a7aeaa3c136689c6b71777c6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 22 Jan 2010 14:35:02 -0200 Subject: perf symbols: Use the right variable to check for kallsyms in the cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Probably this wasn't noticed when testing this on my parisc machine because I must have copied manually to its cache the vmlinux file used in the x86_64 machine, now that I tried looking on a x86-32 machine with a fresh cache, kernel symbols weren't being resolved even with the right kallsyms copy on its cache, duh. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264178102-4203-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1270cf867e61..f1f609dcf9a1 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1650,12 +1650,12 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, getenv("HOME"), sbuild_id) == -1) return -1; + kallsyms_filename = kallsyms_allocated_filename; + if (access(kallsyms_filename, F_OK)) { free(kallsyms_allocated_filename); return -1; } - - kallsyms_filename = kallsyms_allocated_filename; } else { /* * Last resort, if we don't have a build-id and couldn't find -- cgit v1.2.3-58-ga151 From 408f0d18ba6b9bb447f807f621b2c9663c5cf638 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Fri, 22 Jan 2010 22:45:29 +0900 Subject: perf trace: Add -i option for choosing input file perf trace lacks -i option for choosing input file. This patch adds it to perf trace. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1264167929-6741-1-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 8e9cbfe608d6..0b65779e3c10 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -515,6 +515,8 @@ static const struct option options[] = { parse_scriptname), OPT_STRING('g', "gen-script", &generate_script_lang, "lang", "generate perf-trace.xx script in specified language"), + OPT_STRING('i', "input", &input_name, "file", + "input file name"), OPT_END() }; -- cgit v1.2.3-58-ga151 From 339ce1a4dc2ca26444c4f65c31b71a5056f3bb0b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 18 Jan 2010 16:47:07 +1100 Subject: perf: Fix inconsistency between IP and callchain sampling When running perf across all cpus with backtracing (-a -g), sometimes we get samples without associated backtraces: 23.44% init [kernel] [k] restore 11.46% init eeba0c [k] 0x00000000eeba0c 6.77% swapper [kernel] [k] .perf_ctx_adjust_freq 5.73% init [kernel] [k] .__trace_hcall_entry 4.69% perf libc-2.9.so [.] 0x0000000006bb8c | |--11.11%-- 0xfffa941bbbc It turns out the backtrace code has a check for the idle task and the IP sampling does not. This creates problems when profiling an interrupt heavy workload (in my case 10Gbit ethernet) since we get no backtraces for interrupts received while idle (ie most of the workload). Right now x86 and sh check that current is not NULL, which should never happen so remove that too. Idle task's exclusion must be performed from the core code, on top of perf_event_attr:exclude_idle. Signed-off-by: Anton Blanchard Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Benjamin Herrenschmidt Cc: Paul Mundt LKML-Reference: <20100118054707.GT12666@kryten> Signed-off-by: Frederic Weisbecker --- arch/powerpc/kernel/perf_callchain.c | 3 --- arch/sh/kernel/perf_callchain.c | 3 --- arch/x86/kernel/cpu/perf_event.c | 3 --- 3 files changed, 9 deletions(-) diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index a3c11cac3d71..95ad9dad298e 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -495,9 +495,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) entry->nr = 0; - if (current->pid == 0) /* idle task? */ - return entry; - if (!user_mode(regs)) { perf_callchain_kernel(regs, entry); if (current->mm) diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index 24ea837eac5b..a9dd3abde28e 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c @@ -68,9 +68,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) is_user = user_mode(regs); - if (!current || current->pid == 0) - return; - if (is_user && current->state != TASK_RUNNING) return; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b1bb8c550526..ed1998b28a7c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2425,9 +2425,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) is_user = user_mode(regs); - if (!current || current->pid == 0) - return; - if (is_user && current->state != TASK_RUNNING) return; -- cgit v1.2.3-58-ga151 From 430ad5a600a83956749307b13257c464c3826b55 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 28 Jan 2010 09:32:29 +0800 Subject: perf: Factorize trace events raw sample buffer operations Introduce ftrace_perf_buf_prepare() and ftrace_perf_buf_submit() to gather the common code that operates on raw events sampling buffer. This cleans up redundant code between regular trace events, syscall events and kprobe events. Changelog v1->v2: - Rename function name as per Masami and Frederic's suggestion - Add __kprobes for ftrace_perf_buf_prepare() and make ftrace_perf_buf_submit() inline as per Masami's suggestion - Export ftrace_perf_buf_prepare since modules will use it Signed-off-by: Xiao Guangrong Acked-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Steven Rostedt Cc: Paul Mackerras Cc: Jason Baron Cc: Peter Zijlstra LKML-Reference: <4B60E92D.9000808@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker --- include/linux/ftrace_event.h | 18 ++++++-- include/trace/ftrace.h | 48 +++------------------ kernel/trace/trace_event_profile.c | 52 ++++++++++++++++++++--- kernel/trace/trace_kprobe.c | 86 +++++--------------------------------- kernel/trace/trace_syscalls.c | 71 +++++-------------------------- 5 files changed, 88 insertions(+), 187 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 0a09e758c7d3..cd95919d9ff3 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -5,6 +5,7 @@ #include #include #include +#include struct trace_array; struct tracer; @@ -138,9 +139,6 @@ struct ftrace_event_call { #define FTRACE_MAX_PROFILE_SIZE 2048 -extern char *perf_trace_buf; -extern char *perf_trace_buf_nmi; - #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ @@ -195,6 +193,20 @@ extern void ftrace_profile_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); +extern void * +ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, + unsigned long *irq_flags); + +static inline void +ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, + u64 count, unsigned long irq_flags) +{ + struct trace_entry *entry = raw_data; + + perf_tp_event(entry->type, addr, count, raw_data, size); + perf_swevent_put_recursion_context(rctx); + local_irq_restore(irq_flags); +} #endif #endif /* _LINUX_FTRACE_EVENT_H */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 4a46a60c2077..f2c09e4d656c 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -850,22 +850,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ - extern int perf_swevent_get_recursion_context(void); \ - extern void perf_swevent_put_recursion_context(int rctx); \ - extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ - struct trace_entry *ent; \ int __entry_size; \ int __data_size; \ - char *trace_buf; \ - char *raw_data; \ - int __cpu; \ int rctx; \ - int pc; \ - \ - pc = preempt_count(); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ @@ -875,42 +865,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ "profile buffer not large enough")) \ return; \ - \ - local_irq_save(irq_flags); \ - \ - rctx = perf_swevent_get_recursion_context(); \ - if (rctx < 0) \ - goto end_recursion; \ - \ - __cpu = smp_processor_id(); \ - \ - if (in_nmi()) \ - trace_buf = rcu_dereference(perf_trace_buf_nmi); \ - else \ - trace_buf = rcu_dereference(perf_trace_buf); \ - \ - if (!trace_buf) \ - goto end; \ - \ - raw_data = per_cpu_ptr(trace_buf, __cpu); \ - \ - *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ - entry = (struct ftrace_raw_##call *)raw_data; \ - ent = &entry->ent; \ - tracing_generic_entry_update(ent, irq_flags, pc); \ - ent->type = event_call->id; \ - \ + entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ + __entry_size, event_call->id, &rctx, &irq_flags); \ + if (!entry) \ + return; \ tstruct \ \ { assign; } \ \ - perf_tp_event(event_call->id, __addr, __count, entry, \ - __entry_size); \ - \ -end: \ - perf_swevent_put_recursion_context(rctx); \ -end_recursion: \ - local_irq_restore(irq_flags); \ + ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ + __count, irq_flags); \ } #undef DEFINE_EVENT diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 9e25573242cf..f0d693005075 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -6,14 +6,12 @@ */ #include +#include #include "trace.h" -char *perf_trace_buf; -EXPORT_SYMBOL_GPL(perf_trace_buf); - -char *perf_trace_buf_nmi; -EXPORT_SYMBOL_GPL(perf_trace_buf_nmi); +static char *perf_trace_buf; +static char *perf_trace_buf_nmi; typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; @@ -120,3 +118,47 @@ void ftrace_profile_disable(int event_id) } mutex_unlock(&event_mutex); } + +__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, + int *rctxp, unsigned long *irq_flags) +{ + struct trace_entry *entry; + char *trace_buf, *raw_data; + int pc, cpu; + + pc = preempt_count(); + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(*irq_flags); + + *rctxp = perf_swevent_get_recursion_context(); + if (*rctxp < 0) + goto err_recursion; + + cpu = smp_processor_id(); + + if (in_nmi()) + trace_buf = rcu_dereference(perf_trace_buf_nmi); + else + trace_buf = rcu_dereference(perf_trace_buf); + + if (!trace_buf) + goto err; + + raw_data = per_cpu_ptr(trace_buf, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + entry = (struct trace_entry *)raw_data; + tracing_generic_entry_update(entry, *irq_flags, pc); + entry->type = type; + + return raw_data; +err: + perf_swevent_put_recursion_context(*rctxp); +err_recursion: + local_irq_restore(*irq_flags); + return NULL; +} +EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d6266cad6953..2e28ee36646f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1243,14 +1243,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry *entry; - struct trace_entry *ent; - int size, __size, i, pc, __cpu; + int size, __size, i; unsigned long irq_flags; - char *trace_buf; - char *raw_data; int rctx; - pc = preempt_count(); __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); @@ -1258,45 +1254,16 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, "profile buffer not large enough")) return 0; - /* - * Protect the non nmi buffer - * This also protects the rcu read side - */ - local_irq_save(irq_flags); - - rctx = perf_swevent_get_recursion_context(); - if (rctx < 0) - goto end_recursion; - - __cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto end; - - raw_data = per_cpu_ptr(trace_buf, __cpu); - - /* Zero dead bytes from alignment to avoid buffer leak to userspace */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - entry = (struct kprobe_trace_entry *)raw_data; - ent = &entry->ent; + entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + if (!entry) + return 0; - tracing_generic_entry_update(ent, irq_flags, pc); - ent->type = call->id; entry->nargs = tp->nr_args; entry->ip = (unsigned long)kp->addr; for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - perf_tp_event(call->id, entry->ip, 1, entry, size); -end: - perf_swevent_put_recursion_context(rctx); -end_recursion: - local_irq_restore(irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); return 0; } @@ -1308,14 +1275,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry *entry; - struct trace_entry *ent; - int size, __size, i, pc, __cpu; + int size, __size, i; unsigned long irq_flags; - char *trace_buf; - char *raw_data; int rctx; - pc = preempt_count(); __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); @@ -1323,46 +1286,17 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, "profile buffer not large enough")) return 0; - /* - * Protect the non nmi buffer - * This also protects the rcu read side - */ - local_irq_save(irq_flags); - - rctx = perf_swevent_get_recursion_context(); - if (rctx < 0) - goto end_recursion; - - __cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto end; - - raw_data = per_cpu_ptr(trace_buf, __cpu); - - /* Zero dead bytes from alignment to avoid buffer leak to userspace */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - entry = (struct kretprobe_trace_entry *)raw_data; - ent = &entry->ent; + entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + if (!entry) + return 0; - tracing_generic_entry_update(ent, irq_flags, pc); - ent->type = call->id; entry->nargs = tp->nr_args; entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - perf_tp_event(call->id, entry->ret_ip, 1, entry, size); -end: - perf_swevent_put_recursion_context(rctx); -end_recursion: - local_irq_restore(irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); return 0; } diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index f694f66d75b0..4e332b9e449c 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -433,12 +433,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; unsigned long flags; - char *trace_buf; - char *raw_data; int syscall_nr; int rctx; int size; - int cpu; syscall_nr = syscall_get_nr(current, regs); if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) @@ -457,37 +454,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) "profile buffer not large enough")) return; - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(flags); - - rctx = perf_swevent_get_recursion_context(); - if (rctx < 0) - goto end_recursion; - - cpu = smp_processor_id(); - - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto end; - - raw_data = per_cpu_ptr(trace_buf, cpu); - - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, + sys_data->enter_event->id, &rctx, &flags); + if (!rec) + return; - rec = (struct syscall_trace_enter *) raw_data; - tracing_generic_entry_update(&rec->ent, 0, 0); - rec->ent.type = sys_data->enter_event->id; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size); - -end: - perf_swevent_put_recursion_context(rctx); -end_recursion: - local_irq_restore(flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); } int prof_sysenter_enable(struct ftrace_event_call *call) @@ -531,11 +506,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) struct syscall_trace_exit *rec; unsigned long flags; int syscall_nr; - char *trace_buf; - char *raw_data; int rctx; int size; - int cpu; syscall_nr = syscall_get_nr(current, regs); if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) @@ -557,38 +529,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) "exit event has grown above profile buffer size")) return; - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(flags); - - rctx = perf_swevent_get_recursion_context(); - if (rctx < 0) - goto end_recursion; - - cpu = smp_processor_id(); - - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto end; - - raw_data = per_cpu_ptr(trace_buf, cpu); - - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - - rec = (struct syscall_trace_exit *)raw_data; + rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, + sys_data->exit_event->id, &rctx, &flags); + if (!rec) + return; - tracing_generic_entry_update(&rec->ent, 0, 0); - rec->ent.type = sys_data->exit_event->id; rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size); - -end: - perf_swevent_put_recursion_context(rctx); -end_recursion: - local_irq_restore(flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); } int prof_sysexit_enable(struct ftrace_event_call *call) -- cgit v1.2.3-58-ga151 From 1e12a4a7a3a78bc9c3aaf3486dde3b8ab1cdf465 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 28 Jan 2010 09:34:27 +0800 Subject: tracing/kprobe: Cleanup unused return value of tracing functions The return values of the kprobe's tracing functions are meaningless, lets remove these. Signed-off-by: Xiao Guangrong Acked-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Ingo Molnar Cc: Paul Mackerras Cc: Jason Baron Cc: Peter Zijlstra LKML-Reference: <4B60E9A3.2040505@cn.fujitsu.com> [fweisbec@gmail: whitespace fixes, drop useless void returns in end of functions] Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 2e28ee36646f..6178abf3637e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -942,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = { }; /* Kprobe handler */ -static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) +static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct kprobe_trace_entry *entry; @@ -962,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) event = trace_current_buffer_lock_reserve(&buffer, call->id, size, irq_flags, pc); if (!event) - return 0; + return; entry = ring_buffer_event_data(event); entry->nargs = tp->nr_args; @@ -972,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); - return 0; } /* Kretprobe handler */ -static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -995,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, event = trace_current_buffer_lock_reserve(&buffer, call->id, size, irq_flags, pc); if (!event) - return 0; + return; entry = ring_buffer_event_data(event); entry->nargs = tp->nr_args; @@ -1006,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); - - return 0; } /* Event entry printers */ @@ -1237,7 +1234,7 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes int kprobe_profile_func(struct kprobe *kp, +static __kprobes void kprobe_profile_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); @@ -1252,11 +1249,11 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, size -= sizeof(u32); if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, "profile buffer not large enough")) - return 0; + return; entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) - return 0; + return; entry->nargs = tp->nr_args; entry->ip = (unsigned long)kp->addr; @@ -1264,12 +1261,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, entry->args[i] = call_fetch(&tp->args[i].fetch, regs); ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); - - return 0; } /* Kretprobe profile handler */ -static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -1284,11 +1279,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, size -= sizeof(u32); if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, "profile buffer not large enough")) - return 0; + return; entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) - return 0; + return; entry->nargs = tp->nr_args; entry->func = (unsigned long)tp->rp.kp.addr; @@ -1297,8 +1292,6 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, entry->args[i] = call_fetch(&tp->args[i].fetch, regs); ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); - - return 0; } static int probe_profile_enable(struct ftrace_event_call *call) -- cgit v1.2.3-58-ga151 From 40f9249a73f6c251adea492b1c3d19d39e2a9bda Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Thu, 28 Jan 2010 16:44:01 +0530 Subject: x86/debug: Clear reserved bits of DR6 in do_debug() Clear the reserved bits from the stored copy of debug status register (DR6). This will help easy bitwise operations such as quick testing of a debug event origin. Signed-off-by: K.Prasad Cc: Roland McGrath Cc: Jan Kiszka Cc: Alan Stern Cc: Ingo Molnar LKML-Reference: <20100128111401.GB13935@in.ibm.com> Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/debugreg.h | 3 +++ arch/x86/kernel/traps.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 8240f76b531e..b81002f23614 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -14,6 +14,9 @@ which debugging register was responsible for the trap. The other bits are either reserved or not of interest to us. */ +/* Define reserved bits in DR6 which are always set to 1 */ +#define DR6_RESERVED (0xFFFF0FF0) + #define DR_TRAP0 (0x1) /* db0 */ #define DR_TRAP1 (0x2) /* db1 */ #define DR_TRAP2 (0x4) /* db2 */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 33399176512a..1168e4454188 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -534,6 +534,9 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) get_debugreg(dr6, 6); + /* Filter out all the reserved bits which are preset to 1 */ + dr6 &= ~DR6_RESERVED; + /* Catch kmemcheck conditions first of all! */ if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) return; -- cgit v1.2.3-58-ga151 From e0e53db6133c32964fd17f20b17073a402f07ed3 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Thu, 28 Jan 2010 16:44:15 +0530 Subject: x86/hw-breakpoints: Optimize return code from notifier chain in hw_breakpoint_handler Processing of debug exceptions in do_debug() can stop if it originated from a hw-breakpoint exception by returning NOTIFY_STOP in most cases. But for certain cases such as: a) user-space breakpoints with pending SIGTRAP signal delivery (as in the case of ptrace induced breakpoints). b) exceptions due to other causes than breakpoints We will continue to process the exception by returning NOTIFY_DONE. Signed-off-by: K.Prasad Cc: Ingo Molnar Cc: Roland McGrath Cc: Alan Stern Cc: Jan Kiszka LKML-Reference: <20100128111415.GC13935@in.ibm.com> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/hw_breakpoint.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 05d5fec64a94..ae90b4739435 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -502,8 +502,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); - if (bp) - rc = NOTIFY_DONE; /* * Reset the 'i'th TRAP bit in dr6 to denote completion of * exception handling @@ -522,7 +520,13 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) rcu_read_unlock(); } - if (dr6 & (~DR_TRAP_BITS)) + /* + * Further processing in do_debug() is needed for a) user-space + * breakpoints (to generate signals) and b) when the system has + * taken exception due to multiple causes + */ + if ((current->thread.debugreg6 & DR_TRAP_BITS) || + (dr6 & (~DR_TRAP_BITS))) rc = NOTIFY_DONE; set_debugreg(dr7, 7); -- cgit v1.2.3-58-ga151 From 1da53e023029c067ba1277a33038c65d6e4c99b3 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 18 Jan 2010 10:58:01 +0200 Subject: perf_events, x86: Improve x86 event scheduling This patch improves event scheduling by maximizing the use of PMU registers regardless of the order in which events are created in a group. The algorithm takes into account the list of counter constraints for each event. It assigns events to counters from the most constrained, i.e., works on only one counter, to the least constrained, i.e., works on any counter. Intel Fixed counter events and the BTS special event are also handled via this algorithm which is designed to be fairly generic. The patch also updates the validation of an event to use the scheduling algorithm. This will cause early failure in perf_event_open(). The 2nd version of this patch follows the model used by PPC, by running the scheduling algorithm and the actual assignment separately. Actual assignment takes place in hw_perf_enable() whereas scheduling is implemented in hw_perf_group_sched_in() and x86_pmu_enable(). Signed-off-by: Stephane Eranian [ fixup whitespace and style nits as well as adding is_x86_event() ] Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <4b5430c6.0f975e0a.1bf9.ffff85fe@mx.google.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 16 +- arch/x86/kernel/cpu/perf_event.c | 775 +++++++++++++++++++++++++++----------- 2 files changed, 574 insertions(+), 217 deletions(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8d9f8548a870..dbc082685d52 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -26,7 +26,14 @@ /* * Includes eventsel and unit mask as well: */ -#define ARCH_PERFMON_EVENT_MASK 0xffff + + +#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL +#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL +#define INTEL_ARCH_EDGE_MASK 0x00040000ULL +#define INTEL_ARCH_INV_MASK 0x00800000ULL +#define INTEL_ARCH_CNT_MASK 0xFF000000ULL +#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK) /* * filter mask to validate fixed counter events. @@ -37,7 +44,12 @@ * The other filters are supported by fixed counters. * The any-thread option is supported starting with v3. */ -#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 +#define INTEL_ARCH_FIXED_MASK \ + (INTEL_ARCH_CNT_MASK| \ + INTEL_ARCH_INV_MASK| \ + INTEL_ARCH_EDGE_MASK|\ + INTEL_ARCH_UNIT_MASK|\ + INTEL_ARCH_EVENT_MASK) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index ed1998b28a7c..995ac4ae379c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -7,6 +7,7 @@ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, + * Copyright (C) 2009 Google, Inc., Stephane Eranian * * For licencing details see kernel-base/COPYING */ @@ -68,26 +69,37 @@ struct debug_store { u64 pebs_event_reset[MAX_PEBS_EVENTS]; }; +#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) + +struct event_constraint { + u64 idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)]; + int code; + int cmask; +}; + struct cpu_hw_events { - struct perf_event *events[X86_PMC_IDX_MAX]; - unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long interrupts; int enabled; struct debug_store *ds; -}; -struct event_constraint { - unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - int code; + int n_events; + int n_added; + int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ + struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ }; -#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } -#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } +#define EVENT_CONSTRAINT(c, n, m) { \ + .code = (c), \ + .cmask = (m), \ + .idxmsk[0] = (n) } -#define for_each_event_constraint(e, c) \ - for ((e) = (c); (e)->idxmsk[0]; (e)++) +#define EVENT_CONSTRAINT_END \ + { .code = 0, .cmask = 0, .idxmsk[0] = 0 } +#define for_each_event_constraint(e, c) \ + for ((e) = (c); (e)->cmask; (e)++) /* * struct x86_pmu - generic x86 pmu @@ -114,8 +126,9 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); - int (*get_event_idx)(struct cpu_hw_events *cpuc, - struct hw_perf_event *hwc); + void (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk); + void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); + const struct event_constraint *event_constraints; }; static struct x86_pmu x86_pmu __read_mostly; @@ -124,7 +137,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -static const struct event_constraint *event_constraints; +static int x86_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx); /* * Not sure about some of these @@ -171,14 +185,14 @@ static u64 p6_pmu_raw_event(u64 hw_event) return hw_event & P6_EVNTSEL_MASK; } -static const struct event_constraint intel_p6_event_constraints[] = +static struct event_constraint intel_p6_event_constraints[] = { - EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ - EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ - EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ - EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ - EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ - EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK), /* FLOPS */ + EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */ EVENT_CONSTRAINT_END }; @@ -196,32 +210,43 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; -static const struct event_constraint intel_core_event_constraints[] = -{ - EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ - EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ - EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ - EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ - EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ - EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ - EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ - EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ - EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ +static struct event_constraint intel_core_event_constraints[] = +{ + EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */ + EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */ + EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */ + EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */ + EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */ + EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */ EVENT_CONSTRAINT_END }; -static const struct event_constraint intel_nehalem_event_constraints[] = -{ - EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ - EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ - EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ - EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ - EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ - EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ - EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ - EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ - EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ - EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ +static struct event_constraint intel_nehalem_event_constraints[] = +{ + EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */ + EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */ + EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */ + EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */ + EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */ + EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */ + EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */ + EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */ + EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */ + EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_gen_event_constraints[] = +{ + EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */ + EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */ EVENT_CONSTRAINT_END }; @@ -527,11 +552,11 @@ static u64 intel_pmu_raw_event(u64 hw_event) #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ - (CORE_EVNTSEL_EVENT_MASK | \ - CORE_EVNTSEL_UNIT_MASK | \ - CORE_EVNTSEL_EDGE_MASK | \ - CORE_EVNTSEL_INV_MASK | \ - CORE_EVNTSEL_REG_MASK) + (INTEL_ARCH_EVTSEL_MASK | \ + INTEL_ARCH_UNIT_MASK | \ + INTEL_ARCH_EDGE_MASK | \ + INTEL_ARCH_INV_MASK | \ + INTEL_ARCH_CNT_MASK) return hw_event & CORE_EVNTSEL_MASK; } @@ -1120,9 +1145,15 @@ static void amd_pmu_disable_all(void) void hw_perf_disable(void) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + if (!x86_pmu_initialized()) return; - return x86_pmu.disable_all(); + + if (cpuc->enabled) + cpuc->n_added = 0; + + x86_pmu.disable_all(); } static void p6_pmu_enable_all(void) @@ -1189,10 +1220,237 @@ static void amd_pmu_enable_all(void) } } +static const struct pmu pmu; + +static inline int is_x86_event(struct perf_event *event) +{ + return event->pmu == &pmu; +} + +static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) +{ + int i, j , w, num; + int weight, wmax; + unsigned long *c; + u64 constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + struct hw_perf_event *hwc; + + bitmap_zero(used_mask, X86_PMC_IDX_MAX); + + for (i = 0; i < n; i++) { + x86_pmu.get_event_constraints(cpuc, + cpuc->event_list[i], + constraints[i]); + } + + /* + * weight = number of possible counters + * + * 1 = most constrained, only works on one counter + * wmax = least constrained, works on any counter + * + * assign events to counters starting with most + * constrained events. + */ + wmax = x86_pmu.num_events; + + /* + * when fixed event counters are present, + * wmax is incremented by 1 to account + * for one more choice + */ + if (x86_pmu.num_events_fixed) + wmax++; + + num = n; + for (w = 1; num && w <= wmax; w++) { + /* for each event */ + for (i = 0; i < n; i++) { + c = (unsigned long *)constraints[i]; + hwc = &cpuc->event_list[i]->hw; + + weight = bitmap_weight(c, X86_PMC_IDX_MAX); + if (weight != w) + continue; + + /* + * try to reuse previous assignment + * + * This is possible despite the fact that + * events or events order may have changed. + * + * What matters is the level of constraints + * of an event and this is constant for now. + * + * This is possible also because we always + * scan from most to least constrained. Thus, + * if a counter can be reused, it means no, + * more constrained events, needed it. And + * next events will either compete for it + * (which cannot be solved anyway) or they + * have fewer constraints, and they can use + * another counter. + */ + j = hwc->idx; + if (j != -1 && !test_bit(j, used_mask)) + goto skip; + + for_each_bit(j, c, X86_PMC_IDX_MAX) { + if (!test_bit(j, used_mask)) + break; + } + + if (j == X86_PMC_IDX_MAX) + break; +skip: + set_bit(j, used_mask); + +#if 0 + pr_debug("CPU%d config=0x%llx idx=%d assign=%c\n", + smp_processor_id(), + hwc->config, + j, + assign ? 'y' : 'n'); +#endif + + if (assign) + assign[i] = j; + num--; + } + } + /* + * scheduling failed or is just a simulation, + * free resources if necessary + */ + if (!assign || num) { + for (i = 0; i < n; i++) { + if (x86_pmu.put_event_constraints) + x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); + } + } + return num ? -ENOSPC : 0; +} + +/* + * dogrp: true if must collect siblings events (group) + * returns total number of events and error code + */ +static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) +{ + struct perf_event *event; + int n, max_count; + + max_count = x86_pmu.num_events + x86_pmu.num_events_fixed; + + /* current number of events already accepted */ + n = cpuc->n_events; + + if (is_x86_event(leader)) { + if (n >= max_count) + return -ENOSPC; + cpuc->event_list[n] = leader; + n++; + } + if (!dogrp) + return n; + + list_for_each_entry(event, &leader->sibling_list, group_entry) { + if (!is_x86_event(event) || + event->state == PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -ENOSPC; + + cpuc->event_list[n] = event; + n++; + } + return n; +} + + +static inline void x86_assign_hw_event(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + hwc->idx = idx; + + if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { + hwc->config_base = 0; + hwc->event_base = 0; + } else if (hwc->idx >= X86_PMC_IDX_FIXED) { + hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; + /* + * We set it so that event_base + idx in wrmsr/rdmsr maps to + * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: + */ + hwc->event_base = + MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; + } else { + hwc->config_base = x86_pmu.eventsel; + hwc->event_base = x86_pmu.perfctr; + } +} + void hw_perf_enable(void) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct perf_event *event; + struct hw_perf_event *hwc; + int i; + if (!x86_pmu_initialized()) return; + if (cpuc->n_added) { + /* + * apply assignment obtained either from + * hw_perf_group_sched_in() or x86_pmu_enable() + * + * step1: save events moving to new counters + * step2: reprogram moved events into new counters + */ + for (i = 0; i < cpuc->n_events; i++) { + + event = cpuc->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == -1 || hwc->idx == cpuc->assign[i]) + continue; + + x86_pmu.disable(hwc, hwc->idx); + + clear_bit(hwc->idx, cpuc->active_mask); + barrier(); + cpuc->events[hwc->idx] = NULL; + + x86_perf_event_update(event, hwc, hwc->idx); + + hwc->idx = -1; + } + + for (i = 0; i < cpuc->n_events; i++) { + + event = cpuc->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == -1) { + x86_assign_hw_event(event, hwc, cpuc->assign[i]); + x86_perf_event_set_period(event, hwc, hwc->idx); + } + /* + * need to mark as active because x86_pmu_disable() + * clear active_mask and eventsp[] yet it preserves + * idx + */ + set_bit(hwc->idx, cpuc->active_mask); + cpuc->events[hwc->idx] = event; + + x86_pmu.enable(hwc, hwc->idx); + perf_event_update_userpage(event); + } + cpuc->n_added = 0; + perf_events_lapic_init(); + } x86_pmu.enable_all(); } @@ -1391,148 +1649,43 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) x86_pmu_enable_event(hwc, idx); } -static int fixed_mode_idx(struct hw_perf_event *hwc) -{ - unsigned int hw_event; - - hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; - - if (unlikely((hw_event == - x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && - (hwc->sample_period == 1))) - return X86_PMC_IDX_FIXED_BTS; - - if (!x86_pmu.num_events_fixed) - return -1; - - /* - * fixed counters do not take all possible filters - */ - if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) - return -1; - - if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) - return X86_PMC_IDX_FIXED_INSTRUCTIONS; - if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) - return X86_PMC_IDX_FIXED_CPU_CYCLES; - if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) - return X86_PMC_IDX_FIXED_BUS_CYCLES; - - return -1; -} - -/* - * generic counter allocator: get next free counter - */ -static int -gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) -{ - int idx; - - idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); - return idx == x86_pmu.num_events ? -1 : idx; -} - /* - * intel-specific counter allocator: check event constraints - */ -static int -intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) -{ - const struct event_constraint *event_constraint; - int i, code; - - if (!event_constraints) - goto skip; - - code = hwc->config & CORE_EVNTSEL_EVENT_MASK; - - for_each_event_constraint(event_constraint, event_constraints) { - if (code == event_constraint->code) { - for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { - if (!test_and_set_bit(i, cpuc->used_mask)) - return i; - } - return -1; - } - } -skip: - return gen_get_event_idx(cpuc, hwc); -} - -static int -x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) -{ - int idx; - - idx = fixed_mode_idx(hwc); - if (idx == X86_PMC_IDX_FIXED_BTS) { - /* BTS is already occupied. */ - if (test_and_set_bit(idx, cpuc->used_mask)) - return -EAGAIN; - - hwc->config_base = 0; - hwc->event_base = 0; - hwc->idx = idx; - } else if (idx >= 0) { - /* - * Try to get the fixed event, if that is already taken - * then try to get a generic event: - */ - if (test_and_set_bit(idx, cpuc->used_mask)) - goto try_generic; - - hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - /* - * We set it so that event_base + idx in wrmsr/rdmsr maps to - * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: - */ - hwc->event_base = - MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; - hwc->idx = idx; - } else { - idx = hwc->idx; - /* Try to get the previous generic event again */ - if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { -try_generic: - idx = x86_pmu.get_event_idx(cpuc, hwc); - if (idx == -1) - return -EAGAIN; - - set_bit(idx, cpuc->used_mask); - hwc->idx = idx; - } - hwc->config_base = x86_pmu.eventsel; - hwc->event_base = x86_pmu.perfctr; - } - - return idx; -} - -/* - * Find a PMC slot for the freshly enabled / scheduled in event: + * activate a single event + * + * The event is added to the group of enabled events + * but only if it can be scehduled with existing events. + * + * Called with PMU disabled. If successful and return value 1, + * then guaranteed to call perf_enable() and hw_perf_enable() */ static int x86_pmu_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; + struct hw_perf_event *hwc; + int assign[X86_PMC_IDX_MAX]; + int n, n0, ret; - idx = x86_schedule_event(cpuc, hwc); - if (idx < 0) - return idx; + hwc = &event->hw; - perf_events_lapic_init(); + n0 = cpuc->n_events; + n = collect_events(cpuc, event, false); + if (n < 0) + return n; - x86_pmu.disable(hwc, idx); - - cpuc->events[idx] = event; - set_bit(idx, cpuc->active_mask); + ret = x86_schedule_events(cpuc, n, assign); + if (ret) + return ret; + /* + * copy new assignment, now we know it is possible + * will be used by hw_perf_enable() + */ + memcpy(cpuc->assign, assign, n*sizeof(int)); - x86_perf_event_set_period(event, hwc, idx); - x86_pmu.enable(hwc, idx); + cpuc->n_events = n; + cpuc->n_added = n - n0; - perf_event_update_userpage(event); + if (hwc->idx != -1) + x86_perf_event_set_period(event, hwc, hwc->idx); return 0; } @@ -1576,7 +1729,7 @@ void perf_event_print_debug(void) pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); } - pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); + pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); for (idx = 0; idx < x86_pmu.num_events; idx++) { rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); @@ -1664,7 +1817,7 @@ static void x86_pmu_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int i, idx = hwc->idx; /* * Must be done before we disable, otherwise the nmi handler @@ -1690,8 +1843,19 @@ static void x86_pmu_disable(struct perf_event *event) intel_pmu_drain_bts_buffer(cpuc); cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + for (i = 0; i < cpuc->n_events; i++) { + if (event == cpuc->event_list[i]) { + + if (x86_pmu.put_event_constraints) + x86_pmu.put_event_constraints(cpuc, event); + + while (++i < cpuc->n_events) + cpuc->event_list[i-1] = cpuc->event_list[i]; + + --cpuc->n_events; + } + } perf_event_update_userpage(event); } @@ -1962,6 +2126,176 @@ perf_event_nmi_handler(struct notifier_block *self, return NOTIFY_STOP; } +static struct event_constraint bts_constraint = { + .code = 0, + .cmask = 0, + .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS +}; + +static int intel_special_constraints(struct perf_event *event, + u64 *idxmsk) +{ + unsigned int hw_event; + + hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK; + + if (unlikely((hw_event == + x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && + (event->hw.sample_period == 1))) { + + bitmap_copy((unsigned long *)idxmsk, + (unsigned long *)bts_constraint.idxmsk, + X86_PMC_IDX_MAX); + return 1; + } + return 0; +} + +static void intel_get_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event, + u64 *idxmsk) +{ + const struct event_constraint *c; + + /* + * cleanup bitmask + */ + bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX); + + if (intel_special_constraints(event, idxmsk)) + return; + + if (x86_pmu.event_constraints) { + for_each_event_constraint(c, x86_pmu.event_constraints) { + if ((event->hw.config & c->cmask) == c->code) { + + bitmap_copy((unsigned long *)idxmsk, + (unsigned long *)c->idxmsk, + X86_PMC_IDX_MAX); + return; + } + } + } + /* no constraints, means supports all generic counters */ + bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events); +} + +static void amd_get_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event, + u64 *idxmsk) +{ +} + +static int x86_event_sched_in(struct perf_event *event, + struct perf_cpu_context *cpuctx, int cpu) +{ + int ret = 0; + + event->state = PERF_EVENT_STATE_ACTIVE; + event->oncpu = cpu; + event->tstamp_running += event->ctx->time - event->tstamp_stopped; + + if (!is_x86_event(event)) + ret = event->pmu->enable(event); + + if (!ret && !is_software_event(event)) + cpuctx->active_oncpu++; + + if (!ret && event->attr.exclusive) + cpuctx->exclusive = 1; + + return ret; +} + +static void x86_event_sched_out(struct perf_event *event, + struct perf_cpu_context *cpuctx, int cpu) +{ + event->state = PERF_EVENT_STATE_INACTIVE; + event->oncpu = -1; + + if (!is_x86_event(event)) + event->pmu->disable(event); + + event->tstamp_running -= event->ctx->time - event->tstamp_stopped; + + if (!is_software_event(event)) + cpuctx->active_oncpu--; + + if (event->attr.exclusive || !cpuctx->active_oncpu) + cpuctx->exclusive = 0; +} + +/* + * Called to enable a whole group of events. + * Returns 1 if the group was enabled, or -EAGAIN if it could not be. + * Assumes the caller has disabled interrupts and has + * frozen the PMU with hw_perf_save_disable. + * + * called with PMU disabled. If successful and return value 1, + * then guaranteed to call perf_enable() and hw_perf_enable() + */ +int hw_perf_group_sched_in(struct perf_event *leader, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct perf_event *sub; + int assign[X86_PMC_IDX_MAX]; + int n0, n1, ret; + + /* n0 = total number of events */ + n0 = collect_events(cpuc, leader, true); + if (n0 < 0) + return n0; + + ret = x86_schedule_events(cpuc, n0, assign); + if (ret) + return ret; + + ret = x86_event_sched_in(leader, cpuctx, cpu); + if (ret) + return ret; + + n1 = 1; + list_for_each_entry(sub, &leader->sibling_list, group_entry) { + if (sub->state != PERF_EVENT_STATE_OFF) { + ret = x86_event_sched_in(sub, cpuctx, cpu); + if (ret) + goto undo; + ++n1; + } + } + /* + * copy new assignment, now we know it is possible + * will be used by hw_perf_enable() + */ + memcpy(cpuc->assign, assign, n0*sizeof(int)); + + cpuc->n_events = n0; + cpuc->n_added = n1; + ctx->nr_active += n1; + + /* + * 1 means successful and events are active + * This is not quite true because we defer + * actual activation until hw_perf_enable() but + * this way we* ensure caller won't try to enable + * individual events + */ + return 1; +undo: + x86_event_sched_out(leader, cpuctx, cpu); + n0 = 1; + list_for_each_entry(sub, &leader->sibling_list, group_entry) { + if (sub->state == PERF_EVENT_STATE_ACTIVE) { + x86_event_sched_out(sub, cpuctx, cpu); + if (++n0 == n1) + break; + } + } + return ret; +} + static __read_mostly struct notifier_block perf_event_nmi_notifier = { .notifier_call = perf_event_nmi_handler, .next = NULL, @@ -1993,7 +2327,8 @@ static __initconst struct x86_pmu p6_pmu = { */ .event_bits = 32, .event_mask = (1ULL << 32) - 1, - .get_event_idx = intel_get_event_idx, + .get_event_constraints = intel_get_event_constraints, + .event_constraints = intel_p6_event_constraints }; static __initconst struct x86_pmu intel_pmu = { @@ -2017,7 +2352,7 @@ static __initconst struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, - .get_event_idx = intel_get_event_idx, + .get_event_constraints = intel_get_event_constraints }; static __initconst struct x86_pmu amd_pmu = { @@ -2038,7 +2373,7 @@ static __initconst struct x86_pmu amd_pmu = { .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, - .get_event_idx = gen_get_event_idx, + .get_event_constraints = amd_get_event_constraints }; static __init int p6_pmu_init(void) @@ -2051,12 +2386,9 @@ static __init int p6_pmu_init(void) case 7: case 8: case 11: /* Pentium III */ - event_constraints = intel_p6_event_constraints; - break; case 9: case 13: /* Pentium M */ - event_constraints = intel_p6_event_constraints; break; default: pr_cont("unsupported p6 CPU model %d ", @@ -2121,23 +2453,29 @@ static __init int intel_pmu_init(void) memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + x86_pmu.event_constraints = intel_core_event_constraints; pr_cont("Core2 events, "); - event_constraints = intel_core_event_constraints; break; - default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - event_constraints = intel_nehalem_event_constraints; + x86_pmu.event_constraints = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; case 28: memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + x86_pmu.event_constraints = intel_gen_event_constraints; pr_cont("Atom events, "); break; + default: + /* + * default constraints for v2 and up + */ + x86_pmu.event_constraints = intel_gen_event_constraints; + pr_cont("generic architected perfmon, "); } return 0; } @@ -2234,36 +2572,43 @@ static const struct pmu pmu = { .unthrottle = x86_pmu_unthrottle, }; -static int -validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) -{ - struct hw_perf_event fake_event = event->hw; - - if (event->pmu && event->pmu != &pmu) - return 0; - - return x86_schedule_event(cpuc, &fake_event) >= 0; -} - +/* + * validate a single event group + * + * validation include: + * - check events are compatible which each other + * - events do not compete for the same counter + * - number of events <= number of counters + * + * validation ensures the group can be loaded onto the + * PMU if it was the only group available. + */ static int validate_group(struct perf_event *event) { - struct perf_event *sibling, *leader = event->group_leader; - struct cpu_hw_events fake_pmu; + struct perf_event *leader = event->group_leader; + struct cpu_hw_events fake_cpuc; + int n; - memset(&fake_pmu, 0, sizeof(fake_pmu)); + memset(&fake_cpuc, 0, sizeof(fake_cpuc)); - if (!validate_event(&fake_pmu, leader)) + /* + * the event is not yet connected with its + * siblings therefore we must first collect + * existing siblings, then add the new event + * before we can simulate the scheduling + */ + n = collect_events(&fake_cpuc, leader, true); + if (n < 0) return -ENOSPC; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) - return -ENOSPC; - } - - if (!validate_event(&fake_pmu, event)) + fake_cpuc.n_events = n; + n = collect_events(&fake_cpuc, event, false); + if (n < 0) return -ENOSPC; - return 0; + fake_cpuc.n_events = n; + + return x86_schedule_events(&fake_cpuc, n, NULL); } const struct pmu *hw_perf_event_init(struct perf_event *event) -- cgit v1.2.3-58-ga151 From 8113070d6639d2245c6c79afb8df42cedab30540 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 21 Jan 2010 17:39:01 +0200 Subject: perf_events: Add fast-path to the rescheduling code Implement correct fastpath scheduling, i.e., reuse previous assignment. Signed-off-by: Stephane Eranian [ split from larger patch] Signed-off-by: Peter Zijlstra LKML-Reference: <4b588464.1818d00a.4456.383b@mx.google.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 91 +++++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 30 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 995ac4ae379c..0bd23d01af34 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1244,6 +1244,46 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) constraints[i]); } + /* + * fastpath, try to reuse previous register + */ + for (i = 0, num = n; i < n; i++, num--) { + hwc = &cpuc->event_list[i]->hw; + c = (unsigned long *)constraints[i]; + + /* never assigned */ + if (hwc->idx == -1) + break; + + /* constraint still honored */ + if (!test_bit(hwc->idx, c)) + break; + + /* not already used */ + if (test_bit(hwc->idx, used_mask)) + break; + +#if 0 + pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n", + smp_processor_id(), + hwc->config, + hwc->idx, + assign ? 'y' : 'n'); +#endif + + set_bit(hwc->idx, used_mask); + if (assign) + assign[i] = hwc->idx; + } + if (!num) + goto done; + + /* + * begin slow path + */ + + bitmap_zero(used_mask, X86_PMC_IDX_MAX); + /* * weight = number of possible counters * @@ -1263,10 +1303,9 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (x86_pmu.num_events_fixed) wmax++; - num = n; - for (w = 1; num && w <= wmax; w++) { + for (w = 1, num = n; num && w <= wmax; w++) { /* for each event */ - for (i = 0; i < n; i++) { + for (i = 0; num && i < n; i++) { c = (unsigned long *)constraints[i]; hwc = &cpuc->event_list[i]->hw; @@ -1274,28 +1313,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (weight != w) continue; - /* - * try to reuse previous assignment - * - * This is possible despite the fact that - * events or events order may have changed. - * - * What matters is the level of constraints - * of an event and this is constant for now. - * - * This is possible also because we always - * scan from most to least constrained. Thus, - * if a counter can be reused, it means no, - * more constrained events, needed it. And - * next events will either compete for it - * (which cannot be solved anyway) or they - * have fewer constraints, and they can use - * another counter. - */ - j = hwc->idx; - if (j != -1 && !test_bit(j, used_mask)) - goto skip; - for_each_bit(j, c, X86_PMC_IDX_MAX) { if (!test_bit(j, used_mask)) break; @@ -1303,22 +1320,23 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (j == X86_PMC_IDX_MAX) break; -skip: - set_bit(j, used_mask); #if 0 - pr_debug("CPU%d config=0x%llx idx=%d assign=%c\n", + pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n", smp_processor_id(), hwc->config, j, assign ? 'y' : 'n'); #endif + set_bit(j, used_mask); + if (assign) assign[i] = j; num--; } } +done: /* * scheduling failed or is just a simulation, * free resources if necessary @@ -1357,7 +1375,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, list_for_each_entry(event, &leader->sibling_list, group_entry) { if (!is_x86_event(event) || - event->state == PERF_EVENT_STATE_OFF) + event->state <= PERF_EVENT_STATE_OFF) continue; if (n >= max_count) @@ -2184,6 +2202,8 @@ static void amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk) { + /* no constraints, means supports all generic counters */ + bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events); } static int x86_event_sched_in(struct perf_event *event, @@ -2258,7 +2278,7 @@ int hw_perf_group_sched_in(struct perf_event *leader, n1 = 1; list_for_each_entry(sub, &leader->sibling_list, group_entry) { - if (sub->state != PERF_EVENT_STATE_OFF) { + if (sub->state > PERF_EVENT_STATE_OFF) { ret = x86_event_sched_in(sub, cpuctx, cpu); if (ret) goto undo; @@ -2613,12 +2633,23 @@ static int validate_group(struct perf_event *event) const struct pmu *hw_perf_event_init(struct perf_event *event) { + const struct pmu *tmp; int err; err = __hw_perf_event_init(event); if (!err) { + /* + * we temporarily connect event to its pmu + * such that validate_group() can classify + * it as an x86 event using is_x86_event() + */ + tmp = event->pmu; + event->pmu = &pmu; + if (event->group_leader != event) err = validate_group(event); + + event->pmu = tmp; } if (err) { if (event->destroy) -- cgit v1.2.3-58-ga151 From 502568d563bcc37ac505a83341c0c95b88c015a8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 14:35:46 +0100 Subject: perf_event: x86: Allocate the fake_cpuc GCC was complaining the stack usage was too large, so allocate the structure. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155535.411197266@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 0bd23d01af34..7bd359a57839 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2606,10 +2606,13 @@ static const struct pmu pmu = { static int validate_group(struct perf_event *event) { struct perf_event *leader = event->group_leader; - struct cpu_hw_events fake_cpuc; - int n; + struct cpu_hw_events *fake_cpuc; + int ret, n; - memset(&fake_cpuc, 0, sizeof(fake_cpuc)); + ret = -ENOMEM; + fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); + if (!fake_cpuc) + goto out; /* * the event is not yet connected with its @@ -2617,18 +2620,24 @@ static int validate_group(struct perf_event *event) * existing siblings, then add the new event * before we can simulate the scheduling */ - n = collect_events(&fake_cpuc, leader, true); + ret = -ENOSPC; + n = collect_events(fake_cpuc, leader, true); if (n < 0) - return -ENOSPC; + goto out_free; - fake_cpuc.n_events = n; - n = collect_events(&fake_cpuc, event, false); + fake_cpuc->n_events = n; + n = collect_events(fake_cpuc, event, false); if (n < 0) - return -ENOSPC; + goto out_free; - fake_cpuc.n_events = n; + fake_cpuc->n_events = n; - return x86_schedule_events(&fake_cpuc, n, NULL); + ret = x86_schedule_events(fake_cpuc, n, NULL); + +out_free: + kfree(fake_cpuc); +out: + return ret; } const struct pmu *hw_perf_event_init(struct perf_event *event) -- cgit v1.2.3-58-ga151 From 81269a085669b5130058a0275aa7ba9f94abd1fa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 14:55:22 +0100 Subject: perf_event: x86: Fixup constraints typing issue Constraints gets defined an u64 but in long quantities and then cast to long. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155535.504916780@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 7bd359a57839..7e181a5097ea 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1232,7 +1232,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) int i, j , w, num; int weight, wmax; unsigned long *c; - u64 constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + unsigned long constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; struct hw_perf_event *hwc; @@ -1249,7 +1249,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) */ for (i = 0, num = n; i < n; i++, num--) { hwc = &cpuc->event_list[i]->hw; - c = (unsigned long *)constraints[i]; + c = constraints[i]; /* never assigned */ if (hwc->idx == -1) @@ -1306,7 +1306,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) for (w = 1, num = n; num && w <= wmax; w++) { /* for each event */ for (i = 0; num && i < n; i++) { - c = (unsigned long *)constraints[i]; + c = constraints[i]; hwc = &cpuc->event_list[i]->hw; weight = bitmap_weight(c, X86_PMC_IDX_MAX); -- cgit v1.2.3-58-ga151 From c91e0f5da81c6f3a611a1bd6d0cca6717c90fdab Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 15:25:59 +0100 Subject: perf_event: x86: Clean up some of the u64/long bitmask casting We need this to be u64 for direct assigment, but the bitmask functions all work on unsigned long, leading to cast heaven, solve this by using a union. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155535.595961269@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 47 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 7e181a5097ea..921bbf732e77 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -69,10 +69,11 @@ struct debug_store { u64 pebs_event_reset[MAX_PEBS_EVENTS]; }; -#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) - struct event_constraint { - u64 idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)]; + union { + unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + u64 idxmsk64[1]; + }; int code; int cmask; }; @@ -90,13 +91,14 @@ struct cpu_hw_events { struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ }; -#define EVENT_CONSTRAINT(c, n, m) { \ - .code = (c), \ - .cmask = (m), \ - .idxmsk[0] = (n) } +#define EVENT_CONSTRAINT(c, n, m) { \ + { .idxmsk64[0] = (n) }, \ + .code = (c), \ + .cmask = (m), \ +} #define EVENT_CONSTRAINT_END \ - { .code = 0, .cmask = 0, .idxmsk[0] = 0 } + EVENT_CONSTRAINT(0, 0, 0) #define for_each_event_constraint(e, c) \ for ((e) = (c); (e)->cmask; (e)++) @@ -126,8 +128,11 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); - void (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk); - void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); + void (*get_event_constraints)(struct cpu_hw_events *cpuc, + struct perf_event *event, + unsigned long *idxmsk); + void (*put_event_constraints)(struct cpu_hw_events *cpuc, + struct perf_event *event); const struct event_constraint *event_constraints; }; @@ -2144,14 +2149,11 @@ perf_event_nmi_handler(struct notifier_block *self, return NOTIFY_STOP; } -static struct event_constraint bts_constraint = { - .code = 0, - .cmask = 0, - .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS -}; +static struct event_constraint bts_constraint = + EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); static int intel_special_constraints(struct perf_event *event, - u64 *idxmsk) + unsigned long *idxmsk) { unsigned int hw_event; @@ -2171,14 +2173,14 @@ static int intel_special_constraints(struct perf_event *event, static void intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, - u64 *idxmsk) + unsigned long *idxmsk) { const struct event_constraint *c; /* * cleanup bitmask */ - bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX); + bitmap_zero(idxmsk, X86_PMC_IDX_MAX); if (intel_special_constraints(event, idxmsk)) return; @@ -2186,10 +2188,7 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc, if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { if ((event->hw.config & c->cmask) == c->code) { - - bitmap_copy((unsigned long *)idxmsk, - (unsigned long *)c->idxmsk, - X86_PMC_IDX_MAX); + bitmap_copy(idxmsk, c->idxmsk, X86_PMC_IDX_MAX); return; } } @@ -2200,10 +2199,10 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc, static void amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, - u64 *idxmsk) + unsigned long *idxmsk) { /* no constraints, means supports all generic counters */ - bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events); + bitmap_fill(idxmsk, x86_pmu.num_events); } static int x86_event_sched_in(struct perf_event *event, -- cgit v1.2.3-58-ga151 From 8433be1184e4f22c37d4b8ed36cde529a47882f4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 15:38:26 +0100 Subject: perf_event: x86: Reduce some overly long lines with some MACROs Introduce INTEL_EVENT_CONSTRAINT and FIXED_EVENT_CONSTRAINT to reduce some line length and typing work. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155535.688730371@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 68 ++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 921bbf732e77..4d1ed101c10d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -97,6 +97,12 @@ struct cpu_hw_events { .cmask = (m), \ } +#define INTEL_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) + +#define FIXED_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) + #define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) @@ -192,12 +198,12 @@ static u64 p6_pmu_raw_event(u64 hw_event) static struct event_constraint intel_p6_event_constraints[] = { - EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK), /* FLOPS */ - EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */ - EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */ - EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */ - EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */ - EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */ + INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ + INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ + INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ EVENT_CONSTRAINT_END }; @@ -217,41 +223,41 @@ static const u64 intel_perfmon_event_map[] = static struct event_constraint intel_core_event_constraints[] = { - EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */ - EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */ - EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */ - EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */ - EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */ - EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */ - EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */ - EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */ - EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */ - EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */ - EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */ + FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ + FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ + INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ + INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ + INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ + INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_nehalem_event_constraints[] = { - EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */ - EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */ - EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */ - EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */ - EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */ - EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */ - EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */ - EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */ - EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */ - EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ - EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */ - EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */ + FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ + FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ + INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ + INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ + INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ + INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ + INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ + INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ + INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ + INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ + INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_gen_event_constraints[] = { - EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */ - EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */ + FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ + FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ EVENT_CONSTRAINT_END }; -- cgit v1.2.3-58-ga151 From 9f41699ed067fa695faff8e2e9981b2550abec62 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 15:59:29 +0100 Subject: bitops: Provide compile time HWEIGHT{8,16,32,64} Provide compile time versions of hweight. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian Cc: Linus Torvalds Cc: Andrew Morton Cc: Thomas Gleixner LKML-Reference: <20100122155535.797688466@chello.nl> [ Remove some whitespace damage while we are at it ] Signed-off-by: Ingo Molnar --- include/linux/bitops.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index c05a29cb9bb2..ba0fd1eb4af7 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -25,7 +25,7 @@ static __inline__ int get_bitmask_order(unsigned int count) { int order; - + order = fls(count); return order; /* We could be slightly more clever with -1 here... */ } @@ -33,7 +33,7 @@ static __inline__ int get_bitmask_order(unsigned int count) static __inline__ int get_count_order(unsigned int count) { int order; - + order = fls(count) - 1; if (count & (count - 1)) order++; @@ -45,6 +45,20 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } +#define HWEIGHT8(w) \ + ( (!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))) ) + +#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8(w >> 8)) +#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16(w >> 16)) +#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32(w >> 32)) + /** * rol32 - rotate a 32-bit value left * @word: value to rotate -- cgit v1.2.3-58-ga151 From 63b146490befc027a7e0923e333269e68b20d380 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 16:32:17 +0100 Subject: perf_event: x86: Optimize the constraint searching bits Instead of copying bitmasks around, pass pointers to the constraint structure. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155535.887853503@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 75 ++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 41 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4d1ed101c10d..092ad566734c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -134,12 +134,14 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); - void (*get_event_constraints)(struct cpu_hw_events *cpuc, - struct perf_event *event, - unsigned long *idxmsk); + + struct event_constraint * + (*get_event_constraints)(struct cpu_hw_events *cpuc, + struct perf_event *event); + void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); - const struct event_constraint *event_constraints; + struct event_constraint *event_constraints; }; static struct x86_pmu x86_pmu __read_mostly; @@ -1242,17 +1244,15 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { int i, j , w, num; int weight, wmax; - unsigned long *c; - unsigned long constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); for (i = 0; i < n; i++) { - x86_pmu.get_event_constraints(cpuc, - cpuc->event_list[i], - constraints[i]); + constraints[i] = + x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); } /* @@ -1267,7 +1267,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) break; /* constraint still honored */ - if (!test_bit(hwc->idx, c)) + if (!test_bit(hwc->idx, c->idxmsk)) break; /* not already used */ @@ -1320,11 +1320,11 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) c = constraints[i]; hwc = &cpuc->event_list[i]->hw; - weight = bitmap_weight(c, X86_PMC_IDX_MAX); + weight = bitmap_weight(c->idxmsk, X86_PMC_IDX_MAX); if (weight != w) continue; - for_each_bit(j, c, X86_PMC_IDX_MAX) { + for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { if (!test_bit(j, used_mask)) break; } @@ -2155,11 +2155,13 @@ perf_event_nmi_handler(struct notifier_block *self, return NOTIFY_STOP; } +static struct event_constraint unconstrained; + static struct event_constraint bts_constraint = EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); -static int intel_special_constraints(struct perf_event *event, - unsigned long *idxmsk) +static struct event_constraint * +intel_special_constraints(struct perf_event *event) { unsigned int hw_event; @@ -2169,46 +2171,34 @@ static int intel_special_constraints(struct perf_event *event, x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && (event->hw.sample_period == 1))) { - bitmap_copy((unsigned long *)idxmsk, - (unsigned long *)bts_constraint.idxmsk, - X86_PMC_IDX_MAX); - return 1; + return &bts_constraint; } - return 0; + return NULL; } -static void intel_get_event_constraints(struct cpu_hw_events *cpuc, - struct perf_event *event, - unsigned long *idxmsk) +static struct event_constraint * +intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - const struct event_constraint *c; + struct event_constraint *c; - /* - * cleanup bitmask - */ - bitmap_zero(idxmsk, X86_PMC_IDX_MAX); - - if (intel_special_constraints(event, idxmsk)) - return; + c = intel_special_constraints(event); + if (c) + return c; if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { - if ((event->hw.config & c->cmask) == c->code) { - bitmap_copy(idxmsk, c->idxmsk, X86_PMC_IDX_MAX); - return; - } + if ((event->hw.config & c->cmask) == c->code) + return c; } } - /* no constraints, means supports all generic counters */ - bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events); + + return &unconstrained; } -static void amd_get_event_constraints(struct cpu_hw_events *cpuc, - struct perf_event *event, - unsigned long *idxmsk) +static struct event_constraint * +amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - /* no constraints, means supports all generic counters */ - bitmap_fill(idxmsk, x86_pmu.num_events); + return &unconstrained; } static int x86_event_sched_in(struct perf_event *event, @@ -2576,6 +2566,9 @@ void __init init_hw_perf_events(void) perf_events_lapic_init(); register_die_notifier(&perf_event_nmi_notifier); + unconstrained = (struct event_constraint) + EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0); + pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.event_bits); pr_info("... generic registers: %d\n", x86_pmu.num_events); -- cgit v1.2.3-58-ga151 From 272d30be622c9c6cbd514b1211ff359292001baa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 16:32:17 +0100 Subject: perf_event: x86: Optimize constraint weight computation Add a weight member to the constraint structure and avoid recomputing the weight at runtime. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155535.963944926@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 092ad566734c..2c22ce4fa784 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -76,6 +77,7 @@ struct event_constraint { }; int code; int cmask; + int weight; }; struct cpu_hw_events { @@ -95,6 +97,7 @@ struct cpu_hw_events { { .idxmsk64[0] = (n) }, \ .code = (c), \ .cmask = (m), \ + .weight = HWEIGHT64((u64)(n)), \ } #define INTEL_EVENT_CONSTRAINT(c, n) \ @@ -1242,8 +1245,7 @@ static inline int is_x86_event(struct perf_event *event) static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { - int i, j , w, num; - int weight, wmax; + int i, j, w, num, wmax; struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; struct hw_perf_event *hwc; @@ -1320,8 +1322,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) c = constraints[i]; hwc = &cpuc->event_list[i]->hw; - weight = bitmap_weight(c->idxmsk, X86_PMC_IDX_MAX); - if (weight != w) + if (c->weight != w) continue; for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { -- cgit v1.2.3-58-ga151 From c933c1a603d5bf700ddce79216c1be0ec3bc0e6c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Jan 2010 16:40:12 +0100 Subject: perf_event: x86: Optimize the fast path a little more Remove num from the fast path and save a few ops. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100122155536.056430539@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2c22ce4fa784..33c889ff21ae 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1245,9 +1245,9 @@ static inline int is_x86_event(struct perf_event *event) static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { - int i, j, w, num, wmax; struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + int i, j, w, wmax, num = 0; struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); @@ -1260,7 +1260,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) /* * fastpath, try to reuse previous register */ - for (i = 0, num = n; i < n; i++, num--) { + for (i = 0; i < n; i++) { hwc = &cpuc->event_list[i]->hw; c = constraints[i]; @@ -1288,7 +1288,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (assign) assign[i] = hwc->idx; } - if (!num) + if (i == n) goto done; /* -- cgit v1.2.3-58-ga151 From 6c9687abeb24d5b7aae7db5be070c2139ad29e29 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Jan 2010 11:57:25 +0100 Subject: perf_event: x86: Optimize x86_pmu_disable() x86_pmu_disable() removes the event from the cpuc->event_list[], however since an event can only be on that list once, stop looking after we found it. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 33c889ff21ae..66de282ad2fb 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1884,6 +1884,7 @@ static void x86_pmu_disable(struct perf_event *event) cpuc->event_list[i-1] = cpuc->event_list[i]; --cpuc->n_events; + break; } } perf_event_update_userpage(event); -- cgit v1.2.3-58-ga151 From 184f412c3341cd24fbd26604634a5800b83dbdc3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Jan 2010 08:39:39 +0100 Subject: perf, x86: Clean up event constraints code a bit - Remove stray debug code - Improve ugly macros a bit - Remove some whitespace damage - (Also fix up some accumulated damage in perf_event.h) Signed-off-by: Ingo Molnar Cc: Stephane Eranian Cc: Peter Zijlstra LKML-Reference: --- arch/x86/kernel/cpu/perf_event.c | 37 ++++++++----------------------------- include/linux/perf_event.h | 24 +++++++++++------------- 2 files changed, 19 insertions(+), 42 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 66de282ad2fb..fdbe24842271 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -93,24 +93,19 @@ struct cpu_hw_events { struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ }; -#define EVENT_CONSTRAINT(c, n, m) { \ +#define EVENT_CONSTRAINT(c, n, m) { \ { .idxmsk64[0] = (n) }, \ .code = (c), \ .cmask = (m), \ .weight = HWEIGHT64((u64)(n)), \ } -#define INTEL_EVENT_CONSTRAINT(c, n) \ - EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) +#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) +#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) -#define FIXED_EVENT_CONSTRAINT(c, n) \ - EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) +#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0) -#define EVENT_CONSTRAINT_END \ - EVENT_CONSTRAINT(0, 0, 0) - -#define for_each_event_constraint(e, c) \ - for ((e) = (c); (e)->cmask; (e)++) +#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++) /* * struct x86_pmu - generic x86 pmu @@ -1276,14 +1271,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (test_bit(hwc->idx, used_mask)) break; -#if 0 - pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n", - smp_processor_id(), - hwc->config, - hwc->idx, - assign ? 'y' : 'n'); -#endif - set_bit(hwc->idx, used_mask); if (assign) assign[i] = hwc->idx; @@ -1333,14 +1320,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (j == X86_PMC_IDX_MAX) break; -#if 0 - pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n", - smp_processor_id(), - hwc->config, - j, - assign ? 'y' : 'n'); -#endif - set_bit(j, used_mask); if (assign) @@ -2596,9 +2575,9 @@ static const struct pmu pmu = { * validate a single event group * * validation include: - * - check events are compatible which each other - * - events do not compete for the same counter - * - number of events <= number of counters + * - check events are compatible which each other + * - events do not compete for the same counter + * - number of events <= number of counters * * validation ensures the group can be loaded onto the * PMU if it was the only group available. diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 72b2615600d8..953c17731e0d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -290,7 +290,7 @@ struct perf_event_mmap_page { }; #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) -#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_RECORD_MISC_KERNEL (1 << 0) #define PERF_RECORD_MISC_USER (2 << 0) #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) @@ -356,8 +356,8 @@ enum perf_event_type { * u64 stream_id; * }; */ - PERF_RECORD_THROTTLE = 5, - PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, /* * struct { @@ -371,10 +371,10 @@ enum perf_event_type { /* * struct { - * struct perf_event_header header; - * u32 pid, tid; + * struct perf_event_header header; + * u32 pid, tid; * - * struct read_format values; + * struct read_format values; * }; */ PERF_RECORD_READ = 8, @@ -412,7 +412,7 @@ enum perf_event_type { * char data[size];}&& PERF_SAMPLE_RAW * }; */ - PERF_RECORD_SAMPLE = 9, + PERF_RECORD_SAMPLE = 9, PERF_RECORD_MAX, /* non-ABI */ }; @@ -752,8 +752,7 @@ extern int perf_max_events; extern const struct pmu *hw_perf_event_init(struct perf_event *event); extern void perf_event_task_sched_in(struct task_struct *task); -extern void perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next); +extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); extern void perf_event_task_tick(struct task_struct *task); extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); @@ -853,8 +852,7 @@ extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_sample_rate; extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, - void *record, int entry_size); +extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags @@ -895,13 +893,13 @@ static inline void perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { } static inline void -perf_bp_event(struct perf_event *event, void *data) { } +perf_bp_event(struct perf_event *event, void *data) { } static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } -static inline int perf_swevent_get_recursion_context(void) { return -1; } +static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } -- cgit v1.2.3-58-ga151 From 2e8418736dff9c6fdadb2f87dcc2087cebf32167 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Jan 2010 15:58:43 +0100 Subject: perf_event: x86: Deduplicate the disable code Share the meat of the x86_pmu_disable() code with hw_perf_enable(). Also remove the barrier() from that code, since I could not convince myself we actually need it. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fdbe24842271..07fa0c2faa09 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1401,6 +1401,8 @@ static inline void x86_assign_hw_event(struct perf_event *event, } } +static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc); + void hw_perf_enable(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -1426,13 +1428,7 @@ void hw_perf_enable(void) if (hwc->idx == -1 || hwc->idx == cpuc->assign[i]) continue; - x86_pmu.disable(hwc, hwc->idx); - - clear_bit(hwc->idx, cpuc->active_mask); - barrier(); - cpuc->events[hwc->idx] = NULL; - - x86_perf_event_update(event, hwc, hwc->idx); + __x86_pmu_disable(event, cpuc); hwc->idx = -1; } @@ -1822,11 +1818,10 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) event->pending_kill = POLL_IN; } -static void x86_pmu_disable(struct perf_event *event) +static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int i, idx = hwc->idx; + int idx = hwc->idx; /* * Must be done before we disable, otherwise the nmi handler @@ -1835,12 +1830,6 @@ static void x86_pmu_disable(struct perf_event *event) clear_bit(idx, cpuc->active_mask); x86_pmu.disable(hwc, idx); - /* - * Make sure the cleared pointer becomes visible before we - * (potentially) free the event: - */ - barrier(); - /* * Drain the remaining delta count out of a event * that we are disabling: @@ -1852,6 +1841,14 @@ static void x86_pmu_disable(struct perf_event *event) intel_pmu_drain_bts_buffer(cpuc); cpuc->events[idx] = NULL; +} + +static void x86_pmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int i; + + __x86_pmu_disable(event, cpuc); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { -- cgit v1.2.3-58-ga151 From ed8777fc132e589d48a0ba854fdbb5d8203b58e5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Jan 2010 23:07:46 +0100 Subject: perf_events, x86: Fix event constraint masks Since constraints are specified on the event number, not number and unit mask shorten the constraint masks so that we'll actually match something. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100127221121.967610372@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 2 +- arch/x86/kernel/cpu/perf_event.c | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index dbc082685d52..ff5ede128bae 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -49,7 +49,7 @@ INTEL_ARCH_INV_MASK| \ INTEL_ARCH_EDGE_MASK|\ INTEL_ARCH_UNIT_MASK|\ - INTEL_ARCH_EVENT_MASK) + INTEL_ARCH_EVTSEL_MASK) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 07fa0c2faa09..951213a51489 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -100,12 +100,17 @@ struct cpu_hw_events { .weight = HWEIGHT64((u64)(n)), \ } -#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) -#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) +#define INTEL_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) -#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0) +#define FIXED_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) -#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++) +#define EVENT_CONSTRAINT_END \ + EVENT_CONSTRAINT(0, 0, 0) + +#define for_each_event_constraint(e, c) \ + for ((e) = (c); (e)->cmask; (e)++) /* * struct x86_pmu - generic x86 pmu -- cgit v1.2.3-58-ga151 From 1a6e21f791fe85b40a9ddbafe999ab8ccffc3f78 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Jan 2010 23:07:47 +0100 Subject: perf_events, x86: Clean up hw_perf_*_all() implementation Put the recursion avoidance code in the generic hook instead of replicating it in each implementation. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100127221122.057507285@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 59 ++++++++++------------------------------ 1 file changed, 14 insertions(+), 45 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 951213a51489..cf10839f20ea 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event) static void p6_pmu_disable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - barrier(); - /* p6 only has one enable register */ rdmsrl(MSR_P6_EVNTSEL0, val); val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; @@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - barrier(); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) @@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void) struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - /* - * ensure we write the disable before we start disabling the - * events proper, so that amd_pmu_enable_event() does the - * right thing. - */ - barrier(); - for (idx = 0; idx < x86_pmu.num_events; idx++) { u64 val; @@ -1166,23 +1142,20 @@ void hw_perf_disable(void) if (!x86_pmu_initialized()) return; - if (cpuc->enabled) - cpuc->n_added = 0; + if (!cpuc->enabled) + return; + + cpuc->n_added = 0; + cpuc->enabled = 0; + barrier(); x86_pmu.disable_all(); } static void p6_pmu_enable_all(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long val; - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - /* p6 only has one enable register */ rdmsrl(MSR_P6_EVNTSEL0, val); val |= ARCH_PERFMON_EVENTSEL0_ENABLE; @@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { @@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void) struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - for (idx = 0; idx < x86_pmu.num_events; idx++) { struct perf_event *event = cpuc->events[idx]; u64 val; @@ -1417,6 +1378,10 @@ void hw_perf_enable(void) if (!x86_pmu_initialized()) return; + + if (cpuc->enabled) + return; + if (cpuc->n_added) { /* * apply assignment obtained either from @@ -1461,6 +1426,10 @@ void hw_perf_enable(void) cpuc->n_added = 0; perf_events_lapic_init(); } + + cpuc->enabled = 1; + barrier(); + x86_pmu.enable_all(); } -- cgit v1.2.3-58-ga151 From 452a339a976e7f782c786eb3f73080401e2fa3a6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Jan 2010 23:07:48 +0100 Subject: perf_events, x86: Implement Intel Westmere support The new Intel documentation includes Westmere arch specific event maps that are significantly different from the Nehalem ones. Add support for this generation. Found the CPUID model numbers on wikipedia. Also ammend some Nehalem constraints, spotted those when looking for the differences between Nehalem and Westmere. Signed-off-by: Peter Zijlstra Cc: Arjan van de Ven Cc: "H. Peter Anvin" Cc: Stephane Eranian LKML-Reference: <20100127221122.151865645@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 124 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 117 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index cf10839f20ea..3fac0bfc2dee 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -244,18 +244,26 @@ static struct event_constraint intel_core_event_constraints[] = static struct event_constraint intel_nehalem_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ - FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ + FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ + INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ - INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ - INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ - INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ - INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ + INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_westmere_event_constraints[] = +{ + FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ + FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ + INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ + INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ EVENT_CONSTRAINT_END }; @@ -286,6 +294,97 @@ static u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; +static __initconst u64 westmere_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ + [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ + [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ + [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ + [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ + [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ + [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ + [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ + [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ + [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ + [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + static __initconst u64 nehalem_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -2423,7 +2522,9 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_core_event_constraints; pr_cont("Core2 events, "); break; - case 26: + + case 26: /* 45 nm nehalem, "Bloomfield" */ + case 30: /* 45 nm nehalem, "Lynnfield" */ memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2437,6 +2538,15 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_gen_event_constraints; pr_cont("Atom events, "); break; + + case 37: /* 32 nm nehalem, "Clarkdale" */ + case 44: /* 32 nm nehalem, "Gulftown" */ + memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = intel_westmere_event_constraints; + pr_cont("Westmere events, "); + break; default: /* * default constraints for v2 and up -- cgit v1.2.3-58-ga151 From 18c01f8abff51e4910cc5ffb4b710e8c6eea60c9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Jan 2010 23:07:49 +0100 Subject: perf_events, x86: Remove spurious counter reset from x86_pmu_enable() At enable time the counter might still have a ->idx pointing to a previously occupied location that might now be taken by another event. Resetting the counter at that location with data from this event will destroy the other counter's count. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <20100127221122.261477183@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3fac0bfc2dee..518eb3e39577 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1762,9 +1762,6 @@ static int x86_pmu_enable(struct perf_event *event) cpuc->n_events = n; cpuc->n_added = n - n0; - if (hwc->idx != -1) - x86_perf_event_set_period(event, hwc, hwc->idx); - return 0; } -- cgit v1.2.3-58-ga151 From 75c9f3284a7ff957829f44baace82406a6354ceb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 29 Jan 2010 09:04:26 +0100 Subject: perf_events: Fix sample_period transfer on inherit One problem with frequency driven counters is that we cannot predict the rate at which they trigger, therefore we have to start them at period=1, this causes a ramp up effect. However, if we fail to propagate the stable state on fork each new child will have to ramp up again. This can lead to significant artifacts in sample data. Signed-off-by: Peter Zijlstra Cc: eranian@google.com Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1264752266.4283.2121.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 251fb9552492..53dc2a362111 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5002,8 +5002,15 @@ inherit_event(struct perf_event *parent_event, else child_event->state = PERF_EVENT_STATE_OFF; - if (parent_event->attr.freq) - child_event->hw.sample_period = parent_event->hw.sample_period; + if (parent_event->attr.freq) { + u64 sample_period = parent_event->hw.sample_period; + struct hw_perf_event *hwc = &child_event->hw; + + hwc->sample_period = sample_period; + hwc->last_period = sample_period; + + atomic64_set(&hwc->period_left, sample_period); + } child_event->overflow_handler = parent_event->overflow_handler; -- cgit v1.2.3-58-ga151 From 72b8fa1730207274f6818b47b891ce5dff79287e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 27 Jan 2010 21:05:49 -0200 Subject: perf top: Exit if specified --vmlinux can't be used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we do lazy loading of symtabs we only will know if the specified vmlinux file is invalid when we actually have a hit in kernel space and then try to load it. So if we get kernel hits and there are _no_ symbols in the DSO backing the kernel map, bail out. Reported-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264633557-17597-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 2227b84aa002..78f9c4576a0c 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -951,9 +951,31 @@ static void event__process_sample(const event_t *self, } if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 || - al.sym == NULL || al.filtered) + al.filtered) return; + if (al.sym == NULL) { + /* + * As we do lazy loading of symtabs we only will know if the + * specified vmlinux file is invalid when we actually have a + * hit in kernel space and then try to load it. So if we get + * here and there are _no_ symbols in the DSO backing the + * kernel map, bail out. + * + * We may never get here, for instance, if we use -K/ + * --hide-kernel-symbols, even if the user specifies an + * invalid --vmlinux ;-) + */ + if (al.map == session->vmlinux_maps[MAP__FUNCTION] && + RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { + pr_err("The %s file can't be used\n", + symbol_conf.vmlinux_name); + exit(1); + } + + return; + } + syme = symbol__priv(al.sym); if (!syme->skip) { syme->count[counter]++; -- cgit v1.2.3-58-ga151 From a19afe46412452fef89cc623873a8931b3685944 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 27 Jan 2010 21:05:50 -0200 Subject: perf symbols: Factor out dso__load_vmlinux_path() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that we can call it directly from regression tests, and also to reduce the size of dso__load_kernel_sym(), making it more clear. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264633557-17597-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 38 ++++++++++++++++++++++++-------------- tools/perf/util/symbol.h | 2 ++ 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index f1f609dcf9a1..26ec603083e0 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1578,6 +1578,27 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return err; } +int dso__load_vmlinux_path(struct dso *self, struct map *map, + struct perf_session *session, symbol_filter_t filter) +{ + int i, err = 0; + + pr_debug("Looking at the vmlinux_path (%d entries long)\n", + vmlinux_path__nr_entries); + + for (i = 0; i < vmlinux_path__nr_entries; ++i) { + err = dso__load_vmlinux(self, map, session, vmlinux_path[i], + filter); + if (err > 0) { + pr_debug("Using %s for symbols\n", vmlinux_path[i]); + dso__set_long_name(self, strdup(vmlinux_path[i])); + break; + } + } + + return err; +} + static int dso__load_kernel_sym(struct dso *self, struct map *map, struct perf_session *session, symbol_filter_t filter) { @@ -1606,20 +1627,9 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } if (vmlinux_path != NULL) { - int i; - pr_debug("Looking at the vmlinux_path (%d entries long)\n", - vmlinux_path__nr_entries); - for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(self, map, session, - vmlinux_path[i], filter); - if (err > 0) { - pr_debug("Using %s for symbols\n", - vmlinux_path[i]); - dso__set_long_name(self, - strdup(vmlinux_path[i])); - goto out_fixup; - } - } + err = dso__load_vmlinux_path(self, map, session, filter); + if (err > 0) + goto out_fixup; } /* diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ffe0b0f2e5d3..a94997aeb334 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -129,6 +129,8 @@ struct perf_session; int dso__load(struct dso *self, struct map *map, struct perf_session *session, symbol_filter_t filter); +int dso__load_vmlinux_path(struct dso *self, struct map *map, + struct perf_session *session, symbol_filter_t filter); void dsos__fprintf(FILE *fp); size_t dsos__fprintf_buildid(FILE *fp, bool with_hits); -- cgit v1.2.3-58-ga151 From fd1d908c543fbdfae82839d24b0872c542fceedc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 27 Jan 2010 21:05:51 -0200 Subject: perf symbols: Split helpers used when creating kernel dso object MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To make it clear and allow for direct usage by, for instance, regression test suites. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264633557-17597-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 28 +++++++++++++++++++++------- tools/perf/util/symbol.h | 2 ++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 26ec603083e0..f9049d12ead6 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1762,24 +1762,38 @@ size_t dsos__fprintf_buildid(FILE *fp, bool with_hits) __dsos__fprintf_buildid(&dsos__user, fp, with_hits)); } +struct dso *dso__new_kernel(const char *name) +{ + struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); + + if (self != NULL) { + self->short_name = "[kernel]"; + self->kernel = 1; + } + + return self; +} + +void dso__read_running_kernel_build_id(struct dso *self) +{ + if (sysfs__read_build_id("/sys/kernel/notes", self->build_id, + sizeof(self->build_id)) == 0) + self->has_build_id = true; +} + static struct dso *dsos__create_kernel(const char *vmlinux) { - struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]"); + struct dso *kernel = dso__new_kernel(vmlinux); if (kernel == NULL) return NULL; - kernel->short_name = "[kernel]"; - kernel->kernel = 1; - vdso = dso__new("[vdso]"); if (vdso == NULL) goto out_delete_kernel_dso; dso__set_loaded(vdso, MAP__FUNCTION); - if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, - sizeof(kernel->build_id)) == 0) - kernel->has_build_id = true; + dso__read_running_kernel_build_id(kernel); dsos__add(&dsos__kernel, kernel); dsos__add(&dsos__user, vdso); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index a94997aeb334..124302778c09 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -109,6 +109,7 @@ struct dso { }; struct dso *dso__new(const char *name); +struct dso *dso__new_kernel(const char *name); void dso__delete(struct dso *self); bool dso__loaded(const struct dso *self, enum map_type type); @@ -139,6 +140,7 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); char dso__symtab_origin(const struct dso *self); void dso__set_long_name(struct dso *self, char *name); void dso__set_build_id(struct dso *self, void *build_id); +void dso__read_running_kernel_build_id(struct dso *self); struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, const char *name); -- cgit v1.2.3-58-ga151 From 64abebf731df87e6f4ae7d9ffc340bdf0c033e44 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 27 Jan 2010 21:05:52 -0200 Subject: perf session: Create kernel maps in the constructor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removing one extra step needed in the tools that need this, fixing a bug in 'perf probe' where this was not being done. Signed-off-by: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1264633557-17597-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 5 ----- tools/perf/builtin-record.c | 5 ----- tools/perf/builtin-top.c | 5 ----- tools/perf/util/session.c | 13 +++++++++++-- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 7323d9dfbce8..38b8ca900eda 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -491,11 +491,6 @@ static int __cmd_kmem(void) if (!perf_session__has_traces(session, "kmem record")) goto out_delete; - if (perf_session__create_kernel_maps(session) < 0) { - pr_err("Problems creating kernel maps\n"); - return -1; - } - setup_pager(); err = perf_session__process_events(session, &event_ops); if (err != 0) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 7bb9ca1b30fa..90345223908c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -477,11 +477,6 @@ static int __cmd_record(int argc, const char **argv) return -1; } - if (perf_session__create_kernel_maps(session) < 0) { - pr_err("Problems creating kernel maps\n"); - return -1; - } - if (!file_new) { err = perf_header__read(&session->header, output); if (err < 0) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 78f9c4576a0c..1fc018e048e1 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1191,11 +1191,6 @@ static int __cmd_top(void) if (session == NULL) return -ENOMEM; - if (perf_session__create_kernel_maps(session) < 0) { - pr_err("Problems creating kernel maps\n"); - return -1; - } - if (target_pid != -1) event__synthesize_thread(target_pid, event__process, session); else diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 1951e330377c..8e7c1896eaa2 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -70,8 +70,17 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc self->unknown_events = 0; map_groups__init(&self->kmaps); - if (mode == O_RDONLY && perf_session__open(self, force) < 0) - goto out_delete; + if (mode == O_RDONLY) { + if (perf_session__open(self, force) < 0) + goto out_delete; + } else if (mode == O_WRONLY) { + /* + * In O_RDONLY mode this will be performed when reading the + * kernel MMAP event, in event__process_mmap(). + */ + if (perf_session__create_kernel_maps(self) < 0) + goto out_delete; + } self->sample_type = perf_header__sample_type(&self->header); out: -- cgit v1.2.3-58-ga151 From 4c574159d03f4d8a136a7adff2d0b1d82cadcb18 Mon Sep 17 00:00:00 2001 From: Thiago Farina Date: Wed, 27 Jan 2010 21:05:55 -0200 Subject: tools/perf/perf.c: Clean up trivial style issues Checked with: ./../scripts/checkpatch.pl --terse --file perf.c perf.c: 51: ERROR: open brace '{' following function declarations go on the next line perf.c: 73: ERROR: "foo*** bar" should be "foo ***bar" perf.c:112: ERROR: space prohibited before that close parenthesis ')' perf.c:127: ERROR: space prohibited before that close parenthesis ')' perf.c:171: ERROR: "foo** bar" should be "foo **bar" perf.c:213: ERROR: "(foo*)" should be "(foo *)" perf.c:216: ERROR: "(foo*)" should be "(foo *)" perf.c:217: ERROR: space required before that '*' (ctx:OxV) perf.c:452: ERROR: do not initialise statics to 0 or NULL perf.c:453: ERROR: do not initialise statics to 0 or NULL Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Masami Hiramatsu LKML-Reference: <1264633557-17597-7-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/perf.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 05c861c045d5..109b89b30ced 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -48,7 +48,8 @@ int check_pager_config(const char *cmd) return c.val; } -static void commit_pager_choice(void) { +static void commit_pager_choice(void) +{ switch (use_pager) { case 0: setenv("PERF_PAGER", "cat", 1); @@ -70,7 +71,7 @@ static void set_debugfs_path(void) "tracing/events"); } -static int handle_options(const char*** argv, int* argc, int* envchanged) +static int handle_options(const char ***argv, int *argc, int *envchanged) { int handled = 0; @@ -109,7 +110,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--perf-dir")) { if (*argc < 2) { - fprintf(stderr, "No directory given for --perf-dir.\n" ); + fprintf(stderr, "No directory given for --perf-dir.\n"); usage(perf_usage_string); } setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); @@ -124,7 +125,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--work-tree")) { if (*argc < 2) { - fprintf(stderr, "No directory given for --work-tree.\n" ); + fprintf(stderr, "No directory given for --work-tree.\n"); usage(perf_usage_string); } setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); @@ -168,7 +169,7 @@ static int handle_alias(int *argcp, const char ***argv) { int envchanged = 0, ret = 0, saved_errno = errno; int count, option_count; - const char** new_argv; + const char **new_argv; const char *alias_command; char *alias_string; @@ -210,11 +211,11 @@ static int handle_alias(int *argcp, const char ***argv) if (!strcmp(alias_command, new_argv[0])) die("recursive alias: %s", alias_command); - new_argv = realloc(new_argv, sizeof(char*) * + new_argv = realloc(new_argv, sizeof(char *) * (count + *argcp + 1)); /* insert after command name */ - memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); - new_argv[count+*argcp] = NULL; + memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp); + new_argv[count + *argcp] = NULL; *argv = new_argv; *argcp += count - 1; @@ -450,8 +451,8 @@ int main(int argc, const char **argv) setup_path(); while (1) { - static int done_help = 0; - static int was_alias = 0; + static int done_help; + static int was_alias; was_alias = run_argv(&argc, &argv); if (errno != ENOENT) -- cgit v1.2.3-58-ga151 From 6a1b751fb89b61ef7240f2e3ed65a2e2776e7cfd Mon Sep 17 00:00:00 2001 From: John Kacur Date: Wed, 27 Jan 2010 21:05:54 -0200 Subject: perf: Ignore perf-archive temp file Tell git to ignore perf-archive. Signed-off-by: John Kacur Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1264633557-17597-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index 124760bb37b5..e1d60d780784 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -14,6 +14,7 @@ perf*.html common-cmds.h perf.data perf.data.old +perf-archive tags TAGS cscope* -- cgit v1.2.3-58-ga151 From a8e6f734ce9a79d44ebb296f2a341f435227b34e Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sat, 30 Jan 2010 20:55:41 +0900 Subject: Revert "perf record: Intercept all events" This reverts commit f5a2c3dce03621b55f84496f58adc2d1a87ca16f. This patch is required for making "perf lock rec" work. The commit f5a2c3dce0 changes write_event() of builtin-record.c . And changed write_event() sometimes doesn't stop with perf lock rec. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: [ that commit also causes perf record to not be Ctrl-C-able, and it's concetually wrong to parse the data at record time (unconditionally - even when not needed), as we eventually want to be able to do zero-copy recording, at least for non-archive recordings. ] Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 90345223908c..eea56910b91c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -113,24 +113,16 @@ static void write_output(void *buf, size_t size) static void write_event(event_t *buf, size_t size) { - size_t processed_size = buf->header.size; - event_t *ev = buf; - - do { - /* - * Add it to the list of DSOs, so that when we finish this - * record session we can pick the available build-ids. - */ - if (ev->header.type == PERF_RECORD_MMAP) { - struct list_head *head = &dsos__user; - if (ev->header.misc == 1) - head = &dsos__kernel; - __dsos__findnew(head, ev->mmap.filename); - } - - ev = ((void *)ev) + ev->header.size; - processed_size += ev->header.size; - } while (processed_size < size); + /* + * Add it to the list of DSOs, so that when we finish this + * record session we can pick the available build-ids. + */ + if (buf->header.type == PERF_RECORD_MMAP) { + struct list_head *head = &dsos__user; + if (buf->mmap.header.misc == 1) + head = &dsos__kernel; + __dsos__findnew(head, buf->mmap.filename); + } write_output(buf, size); } -- cgit v1.2.3-58-ga151 From 86d8d29634de4464d568e7c335c0da6cba64e8ab Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sat, 30 Jan 2010 20:43:23 +0900 Subject: perf tools: Add __data_loc support This patch is required to test the next patch for perf lock. At 064739bc4b3d7f424b2f25547e6611bcf0132415 , support for the modifier "__data_loc" of format is added. But, when I wanted to parse format of lock_acquired (or some event else), raw_field_ptr() did not returned correct pointer. So I modified raw_field_ptr() like this patch. Then raw_field_ptr() works well. Signed-off-by: Hitoshi Mitake Acked-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Tom Zanussi Cc: Steven Rostedt LKML-Reference: <1264851813-8413-2-git-send-email-mitake@dcl.info.waseda.ac.jp> [ v3: fixed minor stylistic detail ] Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index c5c32be040bf..c4b3cb8a02b1 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1925,6 +1925,15 @@ void *raw_field_ptr(struct event *event, const char *name, void *data) if (!field) return NULL; + if (field->flags & FIELD_IS_STRING) { + int offset; + + offset = *(int *)(data + field->offset); + offset &= 0xffff; + + return data + offset; + } + return data + field->offset; } -- cgit v1.2.3-58-ga151 From 18e97e06b5fb2d7f6cf272ca07d26d8247db8723 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sat, 30 Jan 2010 20:43:24 +0900 Subject: perf: Add util/include/linuxhash.h to include hash.h of kernel linux/hash.h, hash header of kernel, is also useful for perf. util/include/linuxhash.h includes linux/hash.h, so we can use hash facilities (e.g. hash_long()) in perf now. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1264851813-8413-3-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/include/linux/hash.h | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 tools/perf/util/include/linux/hash.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 9b173e66fb41..b2bce1fb4ae1 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -333,6 +333,7 @@ LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h +LIB_H += ../../include/linux/hash.h LIB_H += ../../include/linux/stringify.h LIB_H += util/include/linux/bitmap.h LIB_H += util/include/linux/bitops.h diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h new file mode 100644 index 000000000000..201f57397997 --- /dev/null +++ b/tools/perf/util/include/linux/hash.h @@ -0,0 +1,5 @@ +#include "../../../../include/linux/hash.h" + +#ifndef PERF_HASH_H +#define PERF_HASH_H +#endif -- cgit v1.2.3-58-ga151 From c965be10ca3cb0bdd04016c852764afaf8e647c8 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sat, 30 Jan 2010 20:43:32 +0900 Subject: perf lock: Enhance information of lock trace events Add wait time and lock identification details. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1264851813-8413-11-git-send-email-mitake@dcl.info.waseda.ac.jp> [ removed the file/line bits as we can do that better via IPs ] Signed-off-by: Ingo Molnar --- include/trace/events/lock.h | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h index a870ba125aa8..5c1dcfc16c60 100644 --- a/include/trace/events/lock.h +++ b/include/trace/events/lock.h @@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire, TP_STRUCT__entry( __field(unsigned int, flags) __string(name, lock->name) + __field(void *, lockdep_addr) ), TP_fast_assign( __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); __assign_str(name, lock->name); + __entry->lockdep_addr = lock; ), - TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", + TP_printk("%p %s%s%s", __entry->lockdep_addr, + (__entry->flags & 1) ? "try " : "", (__entry->flags & 2) ? "read " : "", __get_str(name)) ); @@ -40,13 +43,16 @@ TRACE_EVENT(lock_release, TP_STRUCT__entry( __string(name, lock->name) + __field(void *, lockdep_addr) ), TP_fast_assign( __assign_str(name, lock->name); + __entry->lockdep_addr = lock; ), - TP_printk("%s", __get_str(name)) + TP_printk("%p %s", + __entry->lockdep_addr, __get_str(name)) ); #ifdef CONFIG_LOCK_STAT @@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended, TP_STRUCT__entry( __string(name, lock->name) + __field(void *, lockdep_addr) ), TP_fast_assign( __assign_str(name, lock->name); + __entry->lockdep_addr = lock; ), - TP_printk("%s", __get_str(name)) + TP_printk("%p %s", + __entry->lockdep_addr, __get_str(name)) ); TRACE_EVENT(lock_acquired, @@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired, TP_STRUCT__entry( __string(name, lock->name) - __field(unsigned long, wait_usec) - __field(unsigned long, wait_nsec_rem) + __field(s64, wait_nsec) + __field(void *, lockdep_addr) ), + TP_fast_assign( __assign_str(name, lock->name); - __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); - __entry->wait_usec = (unsigned long) waittime; + __entry->wait_nsec = waittime; + __entry->lockdep_addr = lock; ), - TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, - __entry->wait_nsec_rem) + TP_printk("%p %s (%llu ns)", __entry->lockdep_addr, + __get_str(name), + __entry->wait_nsec) ); #endif -- cgit v1.2.3-58-ga151 From 9b5e350c7a46a471d5b452836dbafe9aeaeca435 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sat, 30 Jan 2010 20:43:33 +0900 Subject: perf lock: Introduce new tool "perf lock", for analyzing lock statistics Adding new subcommand "perf lock" to perf. I have a lot of remaining ToDos, but for now perf lock can already provide minimal functionality for analyzing lock statistics. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1264851813-8413-12-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/builtin-lock.c | 724 ++++++++++++++++++++++++++++++++++++++++++++++ tools/perf/builtin.h | 1 + tools/perf/perf.c | 1 + 4 files changed, 727 insertions(+) create mode 100644 tools/perf/builtin-lock.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b2bce1fb4ae1..42969303e20b 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -456,6 +456,7 @@ BUILTIN_OBJS += builtin-top.o BUILTIN_OBJS += builtin-trace.o BUILTIN_OBJS += builtin-probe.o BUILTIN_OBJS += builtin-kmem.o +BUILTIN_OBJS += builtin-lock.o PERFLIBS = $(LIB_FILE) diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c new file mode 100644 index 000000000000..2b5f88754c26 --- /dev/null +++ b/tools/perf/builtin-lock.c @@ -0,0 +1,724 @@ +#include "builtin.h" +#include "perf.h" + +#include "util/util.h" +#include "util/cache.h" +#include "util/symbol.h" +#include "util/thread.h" +#include "util/header.h" + +#include "util/parse-options.h" +#include "util/trace-event.h" + +#include "util/debug.h" +#include "util/session.h" + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* based on kernel/lockdep.c */ +#define LOCKHASH_BITS 12 +#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) + +static struct list_head lockhash_table[LOCKHASH_SIZE]; + +#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) +#define lockhashentry(key) (lockhash_table + __lockhashfn((key))) + +#define LOCK_STATE_UNLOCKED 0 /* initial state */ +#define LOCK_STATE_LOCKED 1 + +struct lock_stat { + struct list_head hash_entry; + struct rb_node rb; /* used for sorting */ + + /* FIXME: raw_field_value() returns unsigned long long, + * so address of lockdep_map should be dealed as 64bit. + * Is there more better solution? */ + void *addr; /* address of lockdep_map, used as ID */ + char *name; /* for strcpy(), we cannot use const */ + char *file; + unsigned int line; + + int state; + u64 prev_event_time; /* timestamp of previous event */ + + unsigned int nr_acquired; + unsigned int nr_acquire; + unsigned int nr_contended; + unsigned int nr_release; + + /* these times are in nano sec. */ + u64 wait_time_total; + u64 wait_time_min; + u64 wait_time_max; +}; + +/* build simple key function one is bigger than two */ +#define SINGLE_KEY(member) \ + static int lock_stat_key_ ## member(struct lock_stat *one, \ + struct lock_stat *two) \ + { \ + return one->member > two->member; \ + } + +SINGLE_KEY(nr_acquired) +SINGLE_KEY(nr_contended) +SINGLE_KEY(wait_time_total) +SINGLE_KEY(wait_time_min) +SINGLE_KEY(wait_time_max) + +struct lock_key { + /* + * name: the value for specify by user + * this should be simpler than raw name of member + * e.g. nr_acquired -> acquired, wait_time_total -> wait_total + */ + const char *name; + int (*key)(struct lock_stat*, struct lock_stat*); +}; + +static const char *sort_key = "acquired"; +static int (*compare)(struct lock_stat *, struct lock_stat *); + +#define DEF_KEY_LOCK(name, fn_suffix) \ + { #name, lock_stat_key_ ## fn_suffix } +struct lock_key keys[] = { + DEF_KEY_LOCK(acquired, nr_acquired), + DEF_KEY_LOCK(contended, nr_contended), + DEF_KEY_LOCK(wait_total, wait_time_total), + DEF_KEY_LOCK(wait_min, wait_time_min), + DEF_KEY_LOCK(wait_max, wait_time_max), + + /* extra comparisons much complicated should be here */ + + { NULL, NULL } +}; + +static void select_key(void) +{ + int i; + + for (i = 0; keys[i].name; i++) { + if (!strcmp(keys[i].name, sort_key)) { + compare = keys[i].key; + return; + } + } + + die("Unknown compare key:%s\n", sort_key); +} + +static struct rb_root result; /* place to store sorted data */ + +static void insert_to_result(struct lock_stat *st, + int (*bigger)(struct lock_stat *, + struct lock_stat *)) +{ + struct rb_node **rb = &result.rb_node; + struct rb_node *parent = NULL; + struct lock_stat *p; + + while (*rb) { + p = container_of(*rb, struct lock_stat, rb); + parent = *rb; + + if (bigger(st, p)) + rb = &(*rb)->rb_left; + else + rb = &(*rb)->rb_right; + } + + rb_link_node(&st->rb, parent, rb); + rb_insert_color(&st->rb, &result); +} + +/* returns left most element of result, and erase it */ +static struct lock_stat *pop_from_result(void) +{ + struct rb_node *node = result.rb_node; + + if (!node) + return NULL; + + while (node->rb_left) + node = node->rb_left; + + rb_erase(node, &result); + return container_of(node, struct lock_stat, rb); +} + +static struct lock_stat *lock_stat_findnew(void *addr, const char *name, + const char *file, unsigned int line) +{ + struct list_head *entry = lockhashentry(addr); + struct lock_stat *ret, *new; + + list_for_each_entry(ret, entry, hash_entry) { + if (ret->addr == addr) + return ret; + } + + new = zalloc(sizeof(struct lock_stat)); + if (!new) + goto alloc_failed; + + new->addr = addr; + new->name = zalloc(sizeof(char) * strlen(name) + 1); + if (!new->name) + goto alloc_failed; + strcpy(new->name, name); + new->file = zalloc(sizeof(char) * strlen(file) + 1); + if (!new->file) + goto alloc_failed; + strcpy(new->file, file); + new->line = line; + + /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */ + new->state = LOCK_STATE_UNLOCKED; + new->wait_time_min = ULLONG_MAX; + + list_add(&new->hash_entry, entry); + return new; + +alloc_failed: + die("memory allocation failed\n"); +} + +static char const *input_name = "perf.data"; + +static int profile_cpu = -1; + +struct raw_event_sample { + u32 size; + char data[0]; +}; + +struct trace_acquire_event { + void *addr; + const char *name; + const char *file; + unsigned int line; +}; + +struct trace_acquired_event { + void *addr; + const char *name; + const char *file; + unsigned int line; +}; + +struct trace_contended_event { + void *addr; + const char *name; + const char *file; + unsigned int line; +}; + +struct trace_release_event { + void *addr; + const char *name; + const char *file; + unsigned int line; +}; + +struct trace_lock_handler { + void (*acquire_event)(struct trace_acquire_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); + + void (*acquired_event)(struct trace_acquired_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); + + void (*contended_event)(struct trace_contended_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); + + void (*release_event)(struct trace_release_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); +}; + +static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct lock_stat *st; + + st = lock_stat_findnew(acquire_event->addr, acquire_event->name, + acquire_event->file, acquire_event->line); + + switch (st->state) { + case LOCK_STATE_UNLOCKED: + break; + case LOCK_STATE_LOCKED: + break; + default: + BUG_ON(1); + break; + } + + st->prev_event_time = timestamp; +} + +static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct lock_stat *st; + + st = lock_stat_findnew(acquired_event->addr, acquired_event->name, + acquired_event->file, acquired_event->line); + + switch (st->state) { + case LOCK_STATE_UNLOCKED: + st->state = LOCK_STATE_LOCKED; + st->nr_acquired++; + break; + case LOCK_STATE_LOCKED: + break; + default: + BUG_ON(1); + break; + } + + st->prev_event_time = timestamp; +} + +static void prof_lock_contended_event(struct trace_contended_event *contended_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct lock_stat *st; + + st = lock_stat_findnew(contended_event->addr, contended_event->name, + contended_event->file, contended_event->line); + + switch (st->state) { + case LOCK_STATE_UNLOCKED: + break; + case LOCK_STATE_LOCKED: + st->nr_contended++; + break; + default: + BUG_ON(1); + break; + } + + st->prev_event_time = timestamp; +} + +static void prof_lock_release_event(struct trace_release_event *release_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct lock_stat *st; + u64 hold_time; + + st = lock_stat_findnew(release_event->addr, release_event->name, + release_event->file, release_event->line); + + switch (st->state) { + case LOCK_STATE_UNLOCKED: + break; + case LOCK_STATE_LOCKED: + st->state = LOCK_STATE_UNLOCKED; + hold_time = timestamp - st->prev_event_time; + + if (timestamp < st->prev_event_time) { + /* terribly, this can happen... */ + goto end; + } + + if (st->wait_time_min > hold_time) + st->wait_time_min = hold_time; + if (st->wait_time_max < hold_time) + st->wait_time_max = hold_time; + st->wait_time_total += hold_time; + + st->nr_release++; + break; + default: + BUG_ON(1); + break; + } + +end: + st->prev_event_time = timestamp; +} + +/* lock oriented handlers */ +/* TODO: handlers for CPU oriented, thread oriented */ +static struct trace_lock_handler prof_lock_ops = { + .acquire_event = prof_lock_acquire_event, + .acquired_event = prof_lock_acquired_event, + .contended_event = prof_lock_contended_event, + .release_event = prof_lock_release_event, +}; + +static struct trace_lock_handler *trace_handler; + +static void +process_lock_acquire_event(void *data, + struct event *event __used, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_acquire_event acquire_event; + u64 tmp; /* this is required for casting... */ + + tmp = raw_field_value(event, "lockdep_addr", data); + memcpy(&acquire_event.addr, &tmp, sizeof(void *)); + acquire_event.name = (char *)raw_field_ptr(event, "name", data); + acquire_event.file = (char *)raw_field_ptr(event, "file", data); + acquire_event.line = + (unsigned int)raw_field_value(event, "line", data); + + if (trace_handler->acquire_event) { + trace_handler->acquire_event(&acquire_event, + event, cpu, timestamp, thread); + } +} + +static void +process_lock_acquired_event(void *data, + struct event *event __used, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_acquired_event acquired_event; + u64 tmp; /* this is required for casting... */ + + tmp = raw_field_value(event, "lockdep_addr", data); + memcpy(&acquired_event.addr, &tmp, sizeof(void *)); + acquired_event.name = (char *)raw_field_ptr(event, "name", data); + acquired_event.file = (char *)raw_field_ptr(event, "file", data); + acquired_event.line = + (unsigned int)raw_field_value(event, "line", data); + + if (trace_handler->acquire_event) { + trace_handler->acquired_event(&acquired_event, + event, cpu, timestamp, thread); + } +} + +static void +process_lock_contended_event(void *data, + struct event *event __used, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_contended_event contended_event; + u64 tmp; /* this is required for casting... */ + + tmp = raw_field_value(event, "lockdep_addr", data); + memcpy(&contended_event.addr, &tmp, sizeof(void *)); + contended_event.name = (char *)raw_field_ptr(event, "name", data); + contended_event.file = (char *)raw_field_ptr(event, "file", data); + contended_event.line = + (unsigned int)raw_field_value(event, "line", data); + + if (trace_handler->acquire_event) { + trace_handler->contended_event(&contended_event, + event, cpu, timestamp, thread); + } +} + +static void +process_lock_release_event(void *data, + struct event *event __used, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_release_event release_event; + u64 tmp; /* this is required for casting... */ + + tmp = raw_field_value(event, "lockdep_addr", data); + memcpy(&release_event.addr, &tmp, sizeof(void *)); + release_event.name = (char *)raw_field_ptr(event, "name", data); + release_event.file = (char *)raw_field_ptr(event, "file", data); + release_event.line = + (unsigned int)raw_field_value(event, "line", data); + + if (trace_handler->acquire_event) { + trace_handler->release_event(&release_event, + event, cpu, timestamp, thread); + } +} + +static void +process_raw_event(void *data, int cpu, + u64 timestamp, struct thread *thread) +{ + struct event *event; + int type; + + type = trace_parse_common_type(data); + event = trace_find_event(type); + + if (!strcmp(event->name, "lock_acquire")) + process_lock_acquire_event(data, event, cpu, timestamp, thread); + if (!strcmp(event->name, "lock_acquired")) + process_lock_acquired_event(data, event, cpu, timestamp, thread); + if (!strcmp(event->name, "lock_contended")) + process_lock_contended_event(data, event, cpu, timestamp, thread); + if (!strcmp(event->name, "lock_release")) + process_lock_release_event(data, event, cpu, timestamp, thread); +} + +static int process_sample_event(event_t *event, struct perf_session *session) +{ + struct thread *thread; + struct sample_data data; + + bzero(&data, sizeof(struct sample_data)); + event__parse_sample(event, session->sample_type, &data); + thread = perf_session__findnew(session, data.pid); + + /* + * FIXME: this causes warn on 32bit environment + * because of (void *)data.ip (type of data.ip is u64) + */ +/* dump_printf("(IP, %d): %d/%d: %p period: %llu\n", */ +/* event->header.misc, */ +/* data.pid, data.tid, (void *)data.ip, data.period); */ + + if (thread == NULL) { + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + if (profile_cpu != -1 && profile_cpu != (int) data.cpu) + return 0; + + process_raw_event(data.raw_data, data.cpu, data.time, thread); + + return 0; +} + +/* TODO: various way to print, coloring, nano or milli sec */ +static void print_result(void) +{ + struct lock_stat *st; + char cut_name[20]; + + printf("%18s ", "ID"); + printf("%20s ", "Name"); + printf("%10s ", "acquired"); + printf("%10s ", "contended"); + + printf("%15s ", "total wait (ns)"); + printf("%15s ", "max wait (ns)"); + printf("%15s ", "min wait (ns)"); + + printf("\n\n"); + + while ((st = pop_from_result())) { + bzero(cut_name, 20); + + printf("%p ", st->addr); + + if (strlen(st->name) < 16) { + /* output raw name */ + printf("%20s ", st->name); + } else { + strncpy(cut_name, st->name, 16); + cut_name[16] = '.'; + cut_name[17] = '.'; + cut_name[18] = '.'; + cut_name[19] = '\0'; + /* cut off name for saving output style */ + printf("%20s ", cut_name); + } + + printf("%10u ", st->nr_acquired); + printf("%10u ", st->nr_contended); + + printf("%15llu ", st->wait_time_total); + printf("%15llu ", st->wait_time_max); + printf("%15llu ", st->wait_time_min == ULLONG_MAX ? + 0 : st->wait_time_min); + printf("\n"); + } +} + +static void dump_map(void) +{ + unsigned int i; + struct lock_stat *st; + + for (i = 0; i < LOCKHASH_SIZE; i++) { + list_for_each_entry(st, &lockhash_table[i], hash_entry) { + printf("%p: %s (src: %s, line: %u)\n", + st->addr, st->name, st->file, st->line); + } + } +} + +static struct perf_event_ops eops = { + .sample = process_sample_event, + .comm = event__process_comm, +}; + +static struct perf_session *session; + +static int read_events(void) +{ + session = perf_session__new(input_name, O_RDONLY, 0); + if (!session) + die("Initializing perf session failed\n"); + + return perf_session__process_events(session, &eops); +} + +static void sort_result(void) +{ + unsigned int i; + struct lock_stat *st; + + for (i = 0; i < LOCKHASH_SIZE; i++) { + list_for_each_entry(st, &lockhash_table[i], hash_entry) { + insert_to_result(st, compare); + } + } +} + +static void __cmd_prof(void) +{ + setup_pager(); + select_key(); + read_events(); + sort_result(); + print_result(); +} + +static const char * const prof_usage[] = { + "perf sched prof []", + NULL +}; + +static const struct option prof_options[] = { + OPT_STRING('k', "key", &sort_key, "acquired", + "key for sorting"), + /* TODO: type */ + OPT_END() +}; + +static const char * const lock_usage[] = { + "perf lock [] {record|trace|prof}", + NULL +}; + +static const struct option lock_options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() +}; + +static const char *record_args[] = { + "record", + "-a", + "-R", + "-M", + "-f", + "-m", "1024", + "-c", "1", + "-e", "lock:lock_acquire:r", + "-e", "lock:lock_acquired:r", + "-e", "lock:lock_contended:r", + "-e", "lock:lock_release:r", +}; + +static int __cmd_record(int argc, const char **argv) +{ + unsigned int rec_argc, i, j; + const char **rec_argv; + + rec_argc = ARRAY_SIZE(record_args) + argc - 1; + rec_argv = calloc(rec_argc + 1, sizeof(char *)); + + for (i = 0; i < ARRAY_SIZE(record_args); i++) + rec_argv[i] = strdup(record_args[i]); + + for (j = 1; j < (unsigned int)argc; j++, i++) + rec_argv[i] = argv[j]; + + BUG_ON(i != rec_argc); + + return cmd_record(i, rec_argv, NULL); +} + +int cmd_lock(int argc, const char **argv, const char *prefix __used) +{ + unsigned int i; + + symbol__init(); + for (i = 0; i < LOCKHASH_SIZE; i++) + INIT_LIST_HEAD(lockhash_table + i); + + argc = parse_options(argc, argv, lock_options, lock_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (!argc) + usage_with_options(lock_usage, lock_options); + + if (!strncmp(argv[0], "rec", 3)) { + return __cmd_record(argc, argv); + } else if (!strncmp(argv[0], "prof", 4)) { + trace_handler = &prof_lock_ops; + if (argc) { + argc = parse_options(argc, argv, + prof_options, prof_usage, 0); + if (argc) + usage_with_options(prof_usage, prof_options); + } + __cmd_prof(); + } else if (!strcmp(argv[0], "trace")) { + /* Aliased to 'perf trace' */ + return cmd_trace(argc, argv, prefix); + } else if (!strcmp(argv[0], "map")) { + /* recycling prof_lock_ops */ + trace_handler = &prof_lock_ops; + setup_pager(); + read_events(); + dump_map(); + } else { + usage_with_options(lock_usage, lock_options); + } + + return 0; +} diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index dee97cfe3794..10fe49e7048a 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -31,5 +31,6 @@ extern int cmd_trace(int argc, const char **argv, const char *prefix); extern int cmd_version(int argc, const char **argv, const char *prefix); extern int cmd_probe(int argc, const char **argv, const char *prefix); extern int cmd_kmem(int argc, const char **argv, const char *prefix); +extern int cmd_lock(int argc, const char **argv, const char *prefix); #endif diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 109b89b30ced..57cb107c1f13 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -303,6 +303,7 @@ static void handle_internal_command(int argc, const char **argv) { "sched", cmd_sched, 0 }, { "probe", cmd_probe, 0 }, { "kmem", cmd_kmem, 0 }, + { "lock", cmd_lock, 0 }, }; unsigned int i; static const char ext[] = STRIP_EXTENSION; -- cgit v1.2.3-58-ga151 From 59f411b62c9282891274e721fea29026b0eda3cc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 31 Jan 2010 08:27:58 +0100 Subject: perf lock: Clean up various details Fix up a few small stylistic details: - use consistent vertical spacing/alignment - remove line80 artifacts - group some global variables better - remove dead code Plus rename 'prof' to 'report' to make it more in line with other tools, and remove the line/file keying as we really want to use IPs like the other tools do. Signed-off-by: Ingo Molnar Cc: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1264851813-8413-12-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin-lock.c | 210 ++++++++++++++++++---------------------------- 1 file changed, 82 insertions(+), 128 deletions(-) diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 2b5f88754c26..fb9ab2ad3f92 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -32,37 +32,37 @@ static struct list_head lockhash_table[LOCKHASH_SIZE]; #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) #define lockhashentry(key) (lockhash_table + __lockhashfn((key))) -#define LOCK_STATE_UNLOCKED 0 /* initial state */ -#define LOCK_STATE_LOCKED 1 +#define LOCK_STATE_UNLOCKED 0 /* initial state */ +#define LOCK_STATE_LOCKED 1 struct lock_stat { - struct list_head hash_entry; - struct rb_node rb; /* used for sorting */ + struct list_head hash_entry; + struct rb_node rb; /* used for sorting */ - /* FIXME: raw_field_value() returns unsigned long long, + /* + * FIXME: raw_field_value() returns unsigned long long, * so address of lockdep_map should be dealed as 64bit. - * Is there more better solution? */ - void *addr; /* address of lockdep_map, used as ID */ - char *name; /* for strcpy(), we cannot use const */ - char *file; - unsigned int line; + * Is there more better solution? + */ + void *addr; /* address of lockdep_map, used as ID */ + char *name; /* for strcpy(), we cannot use const */ - int state; - u64 prev_event_time; /* timestamp of previous event */ + int state; + u64 prev_event_time; /* timestamp of previous event */ - unsigned int nr_acquired; - unsigned int nr_acquire; - unsigned int nr_contended; - unsigned int nr_release; + unsigned int nr_acquired; + unsigned int nr_acquire; + unsigned int nr_contended; + unsigned int nr_release; /* these times are in nano sec. */ - u64 wait_time_total; - u64 wait_time_min; - u64 wait_time_max; + u64 wait_time_total; + u64 wait_time_min; + u64 wait_time_max; }; /* build simple key function one is bigger than two */ -#define SINGLE_KEY(member) \ +#define SINGLE_KEY(member) \ static int lock_stat_key_ ## member(struct lock_stat *one, \ struct lock_stat *two) \ { \ @@ -81,12 +81,15 @@ struct lock_key { * this should be simpler than raw name of member * e.g. nr_acquired -> acquired, wait_time_total -> wait_total */ - const char *name; - int (*key)(struct lock_stat*, struct lock_stat*); + const char *name; + int (*key)(struct lock_stat*, struct lock_stat*); }; -static const char *sort_key = "acquired"; -static int (*compare)(struct lock_stat *, struct lock_stat *); +static const char *sort_key = "acquired"; + +static int (*compare)(struct lock_stat *, struct lock_stat *); + +static struct rb_root result; /* place to store sorted data */ #define DEF_KEY_LOCK(name, fn_suffix) \ { #name, lock_stat_key_ ## fn_suffix } @@ -116,11 +119,8 @@ static void select_key(void) die("Unknown compare key:%s\n", sort_key); } -static struct rb_root result; /* place to store sorted data */ - static void insert_to_result(struct lock_stat *st, - int (*bigger)(struct lock_stat *, - struct lock_stat *)) + int (*bigger)(struct lock_stat *, struct lock_stat *)) { struct rb_node **rb = &result.rb_node; struct rb_node *parent = NULL; @@ -155,8 +155,7 @@ static struct lock_stat *pop_from_result(void) return container_of(node, struct lock_stat, rb); } -static struct lock_stat *lock_stat_findnew(void *addr, const char *name, - const char *file, unsigned int line) +static struct lock_stat *lock_stat_findnew(void *addr, const char *name) { struct list_head *entry = lockhashentry(addr); struct lock_stat *ret, *new; @@ -175,11 +174,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name, if (!new->name) goto alloc_failed; strcpy(new->name, name); - new->file = zalloc(sizeof(char) * strlen(file) + 1); - if (!new->file) - goto alloc_failed; - strcpy(new->file, file); - new->line = line; /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */ new->state = LOCK_STATE_UNLOCKED; @@ -197,36 +191,28 @@ static char const *input_name = "perf.data"; static int profile_cpu = -1; struct raw_event_sample { - u32 size; - char data[0]; + u32 size; + char data[0]; }; struct trace_acquire_event { - void *addr; - const char *name; - const char *file; - unsigned int line; + void *addr; + const char *name; }; struct trace_acquired_event { - void *addr; - const char *name; - const char *file; - unsigned int line; + void *addr; + const char *name; }; struct trace_contended_event { - void *addr; - const char *name; - const char *file; - unsigned int line; + void *addr; + const char *name; }; struct trace_release_event { - void *addr; - const char *name; - const char *file; - unsigned int line; + void *addr; + const char *name; }; struct trace_lock_handler { @@ -255,7 +241,8 @@ struct trace_lock_handler { struct thread *thread); }; -static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event, +static void +report_lock_acquire_event(struct trace_acquire_event *acquire_event, struct event *__event __used, int cpu __used, u64 timestamp, @@ -263,8 +250,7 @@ static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event, { struct lock_stat *st; - st = lock_stat_findnew(acquire_event->addr, acquire_event->name, - acquire_event->file, acquire_event->line); + st = lock_stat_findnew(acquire_event->addr, acquire_event->name); switch (st->state) { case LOCK_STATE_UNLOCKED: @@ -279,7 +265,8 @@ static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event, st->prev_event_time = timestamp; } -static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event, +static void +report_lock_acquired_event(struct trace_acquired_event *acquired_event, struct event *__event __used, int cpu __used, u64 timestamp, @@ -287,8 +274,7 @@ static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event { struct lock_stat *st; - st = lock_stat_findnew(acquired_event->addr, acquired_event->name, - acquired_event->file, acquired_event->line); + st = lock_stat_findnew(acquired_event->addr, acquired_event->name); switch (st->state) { case LOCK_STATE_UNLOCKED: @@ -305,7 +291,8 @@ static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event st->prev_event_time = timestamp; } -static void prof_lock_contended_event(struct trace_contended_event *contended_event, +static void +report_lock_contended_event(struct trace_contended_event *contended_event, struct event *__event __used, int cpu __used, u64 timestamp, @@ -313,8 +300,7 @@ static void prof_lock_contended_event(struct trace_contended_event *contended_ev { struct lock_stat *st; - st = lock_stat_findnew(contended_event->addr, contended_event->name, - contended_event->file, contended_event->line); + st = lock_stat_findnew(contended_event->addr, contended_event->name); switch (st->state) { case LOCK_STATE_UNLOCKED: @@ -330,7 +316,8 @@ static void prof_lock_contended_event(struct trace_contended_event *contended_ev st->prev_event_time = timestamp; } -static void prof_lock_release_event(struct trace_release_event *release_event, +static void +report_lock_release_event(struct trace_release_event *release_event, struct event *__event __used, int cpu __used, u64 timestamp, @@ -339,8 +326,7 @@ static void prof_lock_release_event(struct trace_release_event *release_event, struct lock_stat *st; u64 hold_time; - st = lock_stat_findnew(release_event->addr, release_event->name, - release_event->file, release_event->line); + st = lock_stat_findnew(release_event->addr, release_event->name); switch (st->state) { case LOCK_STATE_UNLOCKED: @@ -373,11 +359,11 @@ end: /* lock oriented handlers */ /* TODO: handlers for CPU oriented, thread oriented */ -static struct trace_lock_handler prof_lock_ops = { - .acquire_event = prof_lock_acquire_event, - .acquired_event = prof_lock_acquired_event, - .contended_event = prof_lock_contended_event, - .release_event = prof_lock_release_event, +static struct trace_lock_handler report_lock_ops = { + .acquire_event = report_lock_acquire_event, + .acquired_event = report_lock_acquired_event, + .contended_event = report_lock_contended_event, + .release_event = report_lock_release_event, }; static struct trace_lock_handler *trace_handler; @@ -395,14 +381,9 @@ process_lock_acquire_event(void *data, tmp = raw_field_value(event, "lockdep_addr", data); memcpy(&acquire_event.addr, &tmp, sizeof(void *)); acquire_event.name = (char *)raw_field_ptr(event, "name", data); - acquire_event.file = (char *)raw_field_ptr(event, "file", data); - acquire_event.line = - (unsigned int)raw_field_value(event, "line", data); - if (trace_handler->acquire_event) { - trace_handler->acquire_event(&acquire_event, - event, cpu, timestamp, thread); - } + if (trace_handler->acquire_event) + trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); } static void @@ -418,14 +399,9 @@ process_lock_acquired_event(void *data, tmp = raw_field_value(event, "lockdep_addr", data); memcpy(&acquired_event.addr, &tmp, sizeof(void *)); acquired_event.name = (char *)raw_field_ptr(event, "name", data); - acquired_event.file = (char *)raw_field_ptr(event, "file", data); - acquired_event.line = - (unsigned int)raw_field_value(event, "line", data); - if (trace_handler->acquire_event) { - trace_handler->acquired_event(&acquired_event, - event, cpu, timestamp, thread); - } + if (trace_handler->acquire_event) + trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread); } static void @@ -441,14 +417,9 @@ process_lock_contended_event(void *data, tmp = raw_field_value(event, "lockdep_addr", data); memcpy(&contended_event.addr, &tmp, sizeof(void *)); contended_event.name = (char *)raw_field_ptr(event, "name", data); - contended_event.file = (char *)raw_field_ptr(event, "file", data); - contended_event.line = - (unsigned int)raw_field_value(event, "line", data); - if (trace_handler->acquire_event) { - trace_handler->contended_event(&contended_event, - event, cpu, timestamp, thread); - } + if (trace_handler->acquire_event) + trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread); } static void @@ -464,14 +435,9 @@ process_lock_release_event(void *data, tmp = raw_field_value(event, "lockdep_addr", data); memcpy(&release_event.addr, &tmp, sizeof(void *)); release_event.name = (char *)raw_field_ptr(event, "name", data); - release_event.file = (char *)raw_field_ptr(event, "file", data); - release_event.line = - (unsigned int)raw_field_value(event, "line", data); - if (trace_handler->acquire_event) { - trace_handler->release_event(&release_event, - event, cpu, timestamp, thread); - } + if (trace_handler->acquire_event) + trace_handler->release_event(&release_event, event, cpu, timestamp, thread); } static void @@ -503,14 +469,6 @@ static int process_sample_event(event_t *event, struct perf_session *session) event__parse_sample(event, session->sample_type, &data); thread = perf_session__findnew(session, data.pid); - /* - * FIXME: this causes warn on 32bit environment - * because of (void *)data.ip (type of data.ip is u64) - */ -/* dump_printf("(IP, %d): %d/%d: %p period: %llu\n", */ -/* event->header.misc, */ -/* data.pid, data.tid, (void *)data.ip, data.period); */ - if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", event->header.type); @@ -580,15 +538,14 @@ static void dump_map(void) for (i = 0; i < LOCKHASH_SIZE; i++) { list_for_each_entry(st, &lockhash_table[i], hash_entry) { - printf("%p: %s (src: %s, line: %u)\n", - st->addr, st->name, st->file, st->line); + printf("%p: %s\n", st->addr, st->name); } } } static struct perf_event_ops eops = { - .sample = process_sample_event, - .comm = event__process_comm, + .sample = process_sample_event, + .comm = event__process_comm, }; static struct perf_session *session; @@ -614,7 +571,7 @@ static void sort_result(void) } } -static void __cmd_prof(void) +static void __cmd_report(void) { setup_pager(); select_key(); @@ -623,12 +580,12 @@ static void __cmd_prof(void) print_result(); } -static const char * const prof_usage[] = { - "perf sched prof []", +static const char * const report_usage[] = { + "perf lock report []", NULL }; -static const struct option prof_options[] = { +static const struct option report_options[] = { OPT_STRING('k', "key", &sort_key, "acquired", "key for sorting"), /* TODO: type */ @@ -636,17 +593,14 @@ static const struct option prof_options[] = { }; static const char * const lock_usage[] = { - "perf lock [] {record|trace|prof}", + "perf lock [] {record|trace|report}", NULL }; static const struct option lock_options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), - OPT_BOOLEAN('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), + OPT_STRING('i', "input", &input_name, "file", "input file name"), + OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_END() }; @@ -698,21 +652,21 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used) if (!strncmp(argv[0], "rec", 3)) { return __cmd_record(argc, argv); - } else if (!strncmp(argv[0], "prof", 4)) { - trace_handler = &prof_lock_ops; + } else if (!strncmp(argv[0], "report", 6)) { + trace_handler = &report_lock_ops; if (argc) { argc = parse_options(argc, argv, - prof_options, prof_usage, 0); + report_options, report_usage, 0); if (argc) - usage_with_options(prof_usage, prof_options); + usage_with_options(report_usage, report_options); } - __cmd_prof(); + __cmd_report(); } else if (!strcmp(argv[0], "trace")) { /* Aliased to 'perf trace' */ return cmd_trace(argc, argv, prefix); } else if (!strcmp(argv[0], "map")) { - /* recycling prof_lock_ops */ - trace_handler = &prof_lock_ops; + /* recycling report_lock_ops */ + trace_handler = &report_lock_ops; setup_pager(); read_events(); dump_map(); -- cgit v1.2.3-58-ga151 From b8f46c5a34fa64fd456295388d18f50ae69d9f37 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 3 Feb 2010 11:53:14 +0800 Subject: perf tools: Use O_LARGEFILE to open perf data file Open perf data file with O_LARGEFILE flag since its size is easily larger that 2G. For example: # rm -rf perf.data # ./perf kmem record sleep 300 [ perf record: Woken up 0 times to write data ] [ perf record: Captured and wrote 3142.147 MB perf.data (~137282513 samples) ] # ll -h perf.data -rw------- 1 root root 3.1G ..... Signed-off-by: Xiao Guangrong Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4B68F32A.9040203@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 5 ++++- tools/perf/util/header.c | 22 +++++++++++++--------- tools/perf/util/session.c | 5 ++++- tools/perf/util/trace-event-read.c | 4 ++-- 4 files changed, 23 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index eea56910b91c..949167efa1ed 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -5,6 +5,9 @@ * (or a CPU, or a PID) into the perf.data output file - for * later analysis via perf report. */ +#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 + #include "builtin.h" #include "perf.h" @@ -451,7 +454,7 @@ static int __cmd_record(int argc, const char **argv) append_file = 0; } - flags = O_CREAT|O_RDWR; + flags = O_CREAT|O_RDWR|O_LARGEFILE; if (append_file) file_new = 0; else diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 2bb2bdb1f456..ed3efd728b41 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1,3 +1,6 @@ +#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 + #include #include #include @@ -382,7 +385,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) sec_size = sizeof(*feat_sec) * nr_sections; sec_start = self->data_offset + self->data_size; - lseek(fd, sec_start + sec_size, SEEK_SET); + lseek64(fd, sec_start + sec_size, SEEK_SET); if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { struct perf_file_section *trace_sec; @@ -390,9 +393,9 @@ static int perf_header__adds_write(struct perf_header *self, int fd) trace_sec = &feat_sec[idx++]; /* Write trace info */ - trace_sec->offset = lseek(fd, 0, SEEK_CUR); + trace_sec->offset = lseek64(fd, 0, SEEK_CUR); read_tracing_data(fd, attrs, nr_counters); - trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; + trace_sec->size = lseek64(fd, 0, SEEK_CUR) - trace_sec->offset; } @@ -402,17 +405,18 @@ static int perf_header__adds_write(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; /* Write build-ids */ - buildid_sec->offset = lseek(fd, 0, SEEK_CUR); + buildid_sec->offset = lseek64(fd, 0, SEEK_CUR); err = dsos__write_buildid_table(fd); if (err < 0) { pr_debug("failed to write buildid table\n"); goto out_free; } - buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; + buildid_sec->size = lseek64(fd, 0, SEEK_CUR) - + buildid_sec->offset; dsos__cache_build_ids(); } - lseek(fd, sec_start, SEEK_SET); + lseek64(fd, sec_start, SEEK_SET); err = do_write(fd, feat_sec, sec_size); if (err < 0) pr_debug("failed to write feature section\n"); @@ -506,7 +510,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) pr_debug("failed to write perf header\n"); return err; } - lseek(fd, self->data_offset + self->data_size, SEEK_SET); + lseek64(fd, self->data_offset + self->data_size, SEEK_SET); self->frozen = 1; return 0; @@ -560,7 +564,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, sec_size = sizeof(*feat_sec) * nr_sections; - lseek(fd, self->data_offset + self->data_size, SEEK_SET); + lseek64(fd, self->data_offset + self->data_size, SEEK_SET); if (perf_header__getbuffer64(self, fd, feat_sec, sec_size)) goto out_free; @@ -634,7 +638,7 @@ static int perf_file_section__process(struct perf_file_section *self, struct perf_header *ph, int feat, int fd) { - if (lseek(fd, self->offset, SEEK_SET) < 0) { + if (lseek64(fd, self->offset, SEEK_SET) < 0) { pr_debug("Failed to lseek to %Ld offset for feature %d, " "continuing...\n", self->offset, feat); return 0; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8e7c1896eaa2..cf91d099f0aa 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1,3 +1,6 @@ +#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 + #include #include @@ -12,7 +15,7 @@ static int perf_session__open(struct perf_session *self, bool force) { struct stat input_stat; - self->fd = open(self->filename, O_RDONLY); + self->fd = open(self->filename, O_RDONLY|O_LARGEFILE); if (self->fd < 0) { pr_err("failed to open file: %s", self->filename); if (!strcmp(self->filename, "perf.data")) diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 1744422cafcb..ca3c26d466f3 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -83,7 +83,7 @@ static char *read_string(void) char *str = NULL; int size = 0; int i; - int r; + s64 r; for (;;) { r = read(input_fd, buf, BUFSIZ); @@ -117,7 +117,7 @@ static char *read_string(void) i++; /* move the file descriptor to the end of the string */ - r = lseek(input_fd, -(r - i), SEEK_CUR); + r = lseek64(input_fd, -(r - i), SEEK_CUR); if (r < 0) die("lseek"); -- cgit v1.2.3-58-ga151 From 9de89fe7c577847877ae00ea1aa6315559b10243 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:00 -0200 Subject: perf symbols: Remove perf_session usage in symbols layer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I noticed while writing the first test in 'perf regtest' that to just test the symbol handling routines one needs to create a perf session, that is a layer centered on a perf.data file, events, etc, so I untied these layers. This reduces the complexity for the users as the number of parameters to most of the symbols and session APIs now was reduced while not adding more state to all the map instances by only having data that is needed to split the kernel (kallsyms and ELF symtab sections) maps and do vmlinux relocation on the main kernel map. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265223128-11786-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 2 +- tools/perf/builtin-probe.c | 5 +- tools/perf/util/event.c | 6 +-- tools/perf/util/map.c | 20 +++++--- tools/perf/util/map.h | 22 +++++--- tools/perf/util/session.c | 35 +++++++++---- tools/perf/util/session.h | 22 +++++--- tools/perf/util/symbol.c | 122 +++++++++++++++++++++------------------------ tools/perf/util/symbol.h | 19 +++---- tools/perf/util/thread.c | 3 +- tools/perf/util/thread.h | 14 ++++-- 11 files changed, 149 insertions(+), 121 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 5d5dc6b09617..924a9518931a 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -369,7 +369,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session, if (is_caller) { addr = data->call_site; if (!raw_ip) - sym = map_groups__find_function(&session->kmaps, session, addr, NULL); + sym = map_groups__find_function(&session->kmaps, addr, NULL); } else addr = data->ptr; diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 34f2acb1cc88..4fa73eca1d82 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -122,8 +122,7 @@ static int opt_del_probe_event(const struct option *opt __used, static void evaluate_probe_point(struct probe_point *pp) { struct symbol *sym; - sym = map__find_symbol_by_name(session.kmap, pp->function, - session.psession, NULL); + sym = map__find_symbol_by_name(session.kmap, pp->function, NULL); if (!sym) die("Kernel symbol \'%s\' not found - probe not added.", pp->function); @@ -132,7 +131,7 @@ static void evaluate_probe_point(struct probe_point *pp) #ifndef NO_LIBDWARF static int open_vmlinux(void) { - if (map__load(session.kmap, session.psession, NULL) < 0) { + if (map__load(session.kmap, NULL) < 0) { pr_debug("Failed to load kernel map.\n"); return -EINVAL; } diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index bbaee61c1683..c3831f633dec 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -374,9 +374,7 @@ int event__process_mmap(event_t *self, struct perf_session *session) goto out_problem; kernel->kernel = 1; - if (__map_groups__create_kernel_maps(&session->kmaps, - session->vmlinux_maps, - kernel) < 0) + if (__perf_session__create_kernel_maps(session, kernel) < 0) goto out_problem; session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start; @@ -476,7 +474,7 @@ void thread__find_addr_location(struct thread *self, { thread__find_addr_map(self, session, cpumode, type, addr, al); if (al->map != NULL) - al->sym = map__find_symbol(al->map, session, al->addr, filter); + al->sym = map__find_symbol(al->map, al->addr, filter); else al->sym = NULL; } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index c4d55a0da2ea..36ff0bf0315d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -104,8 +104,7 @@ void map__fixup_end(struct map *self) #define DSO__DELETED "(deleted)" -int map__load(struct map *self, struct perf_session *session, - symbol_filter_t filter) +int map__load(struct map *self, symbol_filter_t filter) { const char *name = self->dso->long_name; int nr; @@ -113,7 +112,7 @@ int map__load(struct map *self, struct perf_session *session, if (dso__loaded(self->dso, self->type)) return 0; - nr = dso__load(self->dso, self, session, filter); + nr = dso__load(self->dso, self, filter); if (nr < 0) { if (self->dso->has_build_id) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; @@ -144,24 +143,29 @@ int map__load(struct map *self, struct perf_session *session, return -1; } + /* + * Only applies to the kernel, as its symtabs aren't relative like the + * module ones. + */ + if (self->dso->kernel) + map__reloc_vmlinux(self); return 0; } -struct symbol *map__find_symbol(struct map *self, struct perf_session *session, - u64 addr, symbol_filter_t filter) +struct symbol *map__find_symbol(struct map *self, u64 addr, + symbol_filter_t filter) { - if (map__load(self, session, filter) < 0) + if (map__load(self, filter) < 0) return NULL; return dso__find_symbol(self->dso, self->type, addr); } struct symbol *map__find_symbol_by_name(struct map *self, const char *name, - struct perf_session *session, symbol_filter_t filter) { - if (map__load(self, session, filter) < 0) + if (map__load(self, filter) < 0) return NULL; if (!dso__sorted_by_name(self->dso, self->type)) diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 72f0b6ab5ea5..de048399d776 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -14,6 +14,8 @@ enum map_type { #define MAP__NR_TYPES (MAP__VARIABLE + 1) struct dso; +struct ref_reloc_sym; +struct map_groups; struct map { union { @@ -29,6 +31,16 @@ struct map { struct dso *dso; }; +struct kmap { + struct ref_reloc_sym *ref_reloc_sym; + struct map_groups *kmaps; +}; + +static inline struct kmap *map__kmap(struct map *self) +{ + return (struct kmap *)(self + 1); +} + static inline u64 map__map_ip(struct map *map, u64 ip) { return ip - map->start + map->pgoff; @@ -58,16 +70,14 @@ struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); -struct perf_session; - -int map__load(struct map *self, struct perf_session *session, - symbol_filter_t filter); -struct symbol *map__find_symbol(struct map *self, struct perf_session *session, +int map__load(struct map *self, symbol_filter_t filter); +struct symbol *map__find_symbol(struct map *self, u64 addr, symbol_filter_t filter); struct symbol *map__find_symbol_by_name(struct map *self, const char *name, - struct perf_session *session, symbol_filter_t filter); void map__fixup_start(struct map *self); void map__fixup_end(struct map *self); +void map__reloc_vmlinux(struct map *self); + #endif /* __PERF_MAP_H */ diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index cf91d099f0aa..aa8a03120bbd 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -53,6 +53,11 @@ out_close: return -1; } +static inline int perf_session__create_kernel_maps(struct perf_session *self) +{ + return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps); +} + struct perf_session *perf_session__new(const char *filename, int mode, bool force) { size_t len = filename ? strlen(filename) + 1 : 0; @@ -507,6 +512,7 @@ int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, u64 addr) { char *bracket; + enum map_type i; self->ref_reloc_sym.name = strdup(symbol_name); if (self->ref_reloc_sym.name == NULL) @@ -517,6 +523,12 @@ int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, *bracket = '\0'; self->ref_reloc_sym.addr = addr; + + for (i = 0; i < MAP__NR_TYPES; ++i) { + struct kmap *kmap = map__kmap(self->vmlinux_maps[i]); + kmap->ref_reloc_sym = &self->ref_reloc_sym; + } + return 0; } @@ -530,20 +542,21 @@ static u64 map__reloc_unmap_ip(struct map *map, u64 ip) return ip - (s64)map->pgoff; } -void perf_session__reloc_vmlinux_maps(struct perf_session *self, - u64 unrelocated_addr) +void map__reloc_vmlinux(struct map *self) { - enum map_type type; - s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr; + struct kmap *kmap = map__kmap(self); + s64 reloc; - if (!reloc) + if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) return; - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct map *map = self->vmlinux_maps[type]; + reloc = (kmap->ref_reloc_sym->unrelocated_addr - + kmap->ref_reloc_sym->addr); - map->map_ip = map__reloc_map_ip; - map->unmap_ip = map__reloc_unmap_ip; - map->pgoff = reloc; - } + if (!reloc) + return; + + self->map_ip = map__reloc_map_ip; + self->unmap_ip = map__reloc_unmap_ip; + self->pgoff = reloc; } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 36d1a80c0b6c..752d75aebade 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -3,13 +3,13 @@ #include "event.h" #include "header.h" +#include "symbol.h" #include "thread.h" #include #include "../../../include/linux/perf_event.h" struct ip_callchain; struct thread; -struct symbol; struct perf_session { struct perf_header header; @@ -24,10 +24,7 @@ struct perf_session { unsigned long unknown_events; struct rb_root hists; u64 sample_type; - struct { - const char *name; - u64 addr; - } ref_reloc_sym; + struct ref_reloc_sym ref_reloc_sym; int fd; int cwdlen; char *cwd; @@ -69,9 +66,20 @@ int perf_header__read_build_ids(struct perf_header *self, int input, int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, const char *symbol_name, u64 addr); -void perf_session__reloc_vmlinux_maps(struct perf_session *self, - u64 unrelocated_addr); void mem_bswap_64(void *src, int byte_size); +static inline int __perf_session__create_kernel_maps(struct perf_session *self, + struct dso *kernel) +{ + return __map_groups__create_kernel_maps(&self->kmaps, + self->vmlinux_maps, kernel); +} + +static inline struct map * + perf_session__new_module_map(struct perf_session *self, + u64 start, const char *filename) +{ + return map_groups__new_module(&self->kmaps, start, filename); +} #endif /* __PERF_SESSION_H */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index f9049d12ead6..613874260761 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1,6 +1,5 @@ #include "util.h" #include "../perf.h" -#include "session.h" #include "sort.h" #include "string.h" #include "symbol.h" @@ -34,7 +33,7 @@ enum dso_origin { static void dsos__add(struct list_head *head, struct dso *dso); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); static int dso__load_kernel_sym(struct dso *self, struct map *map, - struct perf_session *session, symbol_filter_t filter); + symbol_filter_t filter); static int vmlinux_path__nr_entries; static char **vmlinux_path; @@ -480,8 +479,9 @@ static int dso__load_all_kallsyms(struct dso *self, const char *filename, * the original ELF section names vmlinux have. */ static int dso__split_kallsyms(struct dso *self, struct map *map, - struct perf_session *session, symbol_filter_t filter) + symbol_filter_t filter) { + struct map_groups *kmaps = map__kmap(map)->kmaps; struct map *curr_map = map; struct symbol *pos; int count = 0; @@ -503,7 +503,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, *module++ = '\0'; if (strcmp(curr_map->dso->short_name, module)) { - curr_map = map_groups__find_by_name(&session->kmaps, map->type, module); + curr_map = map_groups__find_by_name(kmaps, map->type, module); if (curr_map == NULL) { pr_debug("/proc/{kallsyms,modules} " "inconsistency while looking " @@ -538,7 +538,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, } curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; - map_groups__insert(&session->kmaps, curr_map); + map_groups__insert(kmaps, curr_map); ++kernel_range; } @@ -557,9 +557,8 @@ discard_symbol: rb_erase(&pos->rb_node, root); return count; } - -static int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, - struct perf_session *session, symbol_filter_t filter) +int dso__load_kallsyms(struct dso *self, const char *filename, + struct map *map, symbol_filter_t filter) { if (dso__load_all_kallsyms(self, filename, map) < 0) return -1; @@ -567,7 +566,7 @@ static int dso__load_kallsyms(struct dso *self, const char *filename, struct map symbols__fixup_end(&self->symbols[map->type]); self->origin = DSO__ORIG_KERNEL; - return dso__split_kallsyms(self, map, session, filter); + return dso__split_kallsyms(self, map, filter); } static int dso__load_perf_map(struct dso *self, struct map *map, @@ -893,10 +892,10 @@ static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type } } -static int dso__load_sym(struct dso *self, struct map *map, - struct perf_session *session, const char *name, int fd, - symbol_filter_t filter, int kernel, int kmodule) +static int dso__load_sym(struct dso *self, struct map *map, const char *name, + int fd, symbol_filter_t filter, int kmodule) { + struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; struct map *curr_map = map; struct dso *curr_dso = self; size_t dso_name_len = strlen(self->short_name); @@ -953,7 +952,7 @@ static int dso__load_sym(struct dso *self, struct map *map, nr_syms = shdr.sh_size / shdr.sh_entsize; memset(&sym, 0, sizeof(sym)); - if (!kernel) { + if (!self->kernel) { self->adjust_symbols = (ehdr.e_type == ET_EXEC || elf_section_by_name(elf, &ehdr, &shdr, ".gnu.prelink_undo", @@ -967,9 +966,9 @@ static int dso__load_sym(struct dso *self, struct map *map, int is_label = elf_sym__is_label(&sym); const char *section_name; - if (kernel && session->ref_reloc_sym.name != NULL && - strcmp(elf_name, session->ref_reloc_sym.name) == 0) - perf_session__reloc_vmlinux_maps(session, sym.st_value); + if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && + strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) + kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; if (!is_label && !elf_sym__is_a(&sym, map->type)) continue; @@ -985,7 +984,7 @@ static int dso__load_sym(struct dso *self, struct map *map, section_name = elf_sec__name(&shdr, secstrs); - if (kernel || kmodule) { + if (self->kernel || kmodule) { char dso_name[PATH_MAX]; if (strcmp(section_name, @@ -1001,7 +1000,7 @@ static int dso__load_sym(struct dso *self, struct map *map, snprintf(dso_name, sizeof(dso_name), "%s%s", self->short_name, section_name); - curr_map = map_groups__find_by_name(&session->kmaps, map->type, dso_name); + curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); if (curr_map == NULL) { u64 start = sym.st_value; @@ -1020,7 +1019,7 @@ static int dso__load_sym(struct dso *self, struct map *map, curr_map->map_ip = identity__map_ip; curr_map->unmap_ip = identity__map_ip; curr_dso->origin = DSO__ORIG_KERNEL; - map_groups__insert(&session->kmaps, curr_map); + map_groups__insert(kmap->kmaps, curr_map); dsos__add(&dsos__kernel, curr_dso); } else curr_dso = curr_map->dso; @@ -1236,8 +1235,7 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, struct map *map, struct perf_session *session, - symbol_filter_t filter) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; char *name; @@ -1249,7 +1247,7 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session, dso__set_loaded(self, map->type); if (self->kernel) - return dso__load_kernel_sym(self, map, session, filter); + return dso__load_kernel_sym(self, map, filter); name = malloc(size); if (!name) @@ -1320,7 +1318,7 @@ open_file: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, map, NULL, name, fd, filter, 0, 0); + ret = dso__load_sym(self, map, name, fd, filter, 0); close(fd); /* @@ -1376,7 +1374,7 @@ static int dso__kernel_module_get_build_id(struct dso *self) return 0; } -static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname) +static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirname) { struct dirent *dent; DIR *dir = opendir(dirname); @@ -1396,7 +1394,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d snprintf(path, sizeof(path), "%s/%s", dirname, dent->d_name); - if (perf_session__set_modules_path_dir(self, path) < 0) + if (map_groups__set_modules_path_dir(self, path) < 0) goto failure; } else { char *dot = strrchr(dent->d_name, '.'), @@ -1410,7 +1408,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d (int)(dot - dent->d_name), dent->d_name); strxfrchar(dso_name, '-', '_'); - map = map_groups__find_by_name(&self->kmaps, MAP__FUNCTION, dso_name); + map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); if (map == NULL) continue; @@ -1431,7 +1429,7 @@ failure: return -1; } -static int perf_session__set_modules_path(struct perf_session *self) +static int map_groups__set_modules_path(struct map_groups *self) { struct utsname uts; char modules_path[PATH_MAX]; @@ -1442,7 +1440,7 @@ static int perf_session__set_modules_path(struct perf_session *self) snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", uts.release); - return perf_session__set_modules_path_dir(self, modules_path); + return map_groups__set_modules_path_dir(self, modules_path); } /* @@ -1452,8 +1450,8 @@ static int perf_session__set_modules_path(struct perf_session *self) */ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) { - struct map *self = malloc(sizeof(*self)); - + struct map *self = zalloc(sizeof(*self) + + (dso->kernel ? sizeof(struct kmap) : 0)); if (self != NULL) { /* * ->end will be filled after we load all the symbols @@ -1464,8 +1462,8 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) return self; } -struct map *perf_session__new_module_map(struct perf_session *self, u64 start, - const char *filename) +struct map *map_groups__new_module(struct map_groups *self, u64 start, + const char *filename) { struct map *map; struct dso *dso = __dsos__findnew(&dsos__kernel, filename); @@ -1478,11 +1476,11 @@ struct map *perf_session__new_module_map(struct perf_session *self, u64 start, return NULL; dso->origin = DSO__ORIG_KMODULE; - map_groups__insert(&self->kmaps, map); + map_groups__insert(self, map); return map; } -static int perf_session__create_module_maps(struct perf_session *self) +static int map_groups__create_modules(struct map_groups *self) { char *line = NULL; size_t n; @@ -1520,7 +1518,7 @@ static int perf_session__create_module_maps(struct perf_session *self) *sep = '\0'; snprintf(name, sizeof(name), "[%s]", line); - map = perf_session__new_module_map(self, start, name); + map = map_groups__new_module(self, start, name); if (map == NULL) goto out_delete_line; dso__kernel_module_get_build_id(map->dso); @@ -1529,7 +1527,7 @@ static int perf_session__create_module_maps(struct perf_session *self) free(line); fclose(file); - return perf_session__set_modules_path(self); + return map_groups__set_modules_path(self); out_delete_line: free(line); @@ -1538,7 +1536,6 @@ out_failure: } static int dso__load_vmlinux(struct dso *self, struct map *map, - struct perf_session *session, const char *vmlinux, symbol_filter_t filter) { int err = -1, fd; @@ -1572,14 +1569,14 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return -1; dso__set_loaded(self, map->type); - err = dso__load_sym(self, map, session, vmlinux, fd, filter, 1, 0); + err = dso__load_sym(self, map, vmlinux, fd, filter, 0); close(fd); return err; } int dso__load_vmlinux_path(struct dso *self, struct map *map, - struct perf_session *session, symbol_filter_t filter) + symbol_filter_t filter) { int i, err = 0; @@ -1587,8 +1584,7 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map, vmlinux_path__nr_entries); for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(self, map, session, vmlinux_path[i], - filter); + err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); if (err > 0) { pr_debug("Using %s for symbols\n", vmlinux_path[i]); dso__set_long_name(self, strdup(vmlinux_path[i])); @@ -1600,7 +1596,7 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map, } static int dso__load_kernel_sym(struct dso *self, struct map *map, - struct perf_session *session, symbol_filter_t filter) + symbol_filter_t filter) { int err; const char *kallsyms_filename = NULL; @@ -1621,13 +1617,13 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, * match. */ if (symbol_conf.vmlinux_name != NULL) { - err = dso__load_vmlinux(self, map, session, + err = dso__load_vmlinux(self, map, symbol_conf.vmlinux_name, filter); goto out_try_fixup; } if (vmlinux_path != NULL) { - err = dso__load_vmlinux_path(self, map, session, filter); + err = dso__load_vmlinux_path(self, map, filter); if (err > 0) goto out_fixup; } @@ -1675,7 +1671,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } do_kallsyms: - err = dso__load_kallsyms(self, kallsyms_filename, map, session, filter); + err = dso__load_kallsyms(self, kallsyms_filename, map, filter); free(kallsyms_allocated_filename); out_try_fixup: @@ -1812,30 +1808,23 @@ int __map_groups__create_kernel_maps(struct map_groups *self, enum map_type type; for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + vmlinux_maps[type] = map__new2(0, kernel, type); if (vmlinux_maps[type] == NULL) return -1; vmlinux_maps[type]->map_ip = vmlinux_maps[type]->unmap_ip = identity__map_ip; + + kmap = map__kmap(vmlinux_maps[type]); + kmap->kmaps = self; map_groups__insert(self, vmlinux_maps[type]); } return 0; } -static int map_groups__create_kernel_maps(struct map_groups *self, - struct map *vmlinux_maps[MAP__NR_TYPES], - const char *vmlinux) -{ - struct dso *kernel = dsos__create_kernel(vmlinux); - - if (kernel == NULL) - return -1; - - return __map_groups__create_kernel_maps(self, vmlinux_maps, kernel); -} - static void vmlinux_path__exit(void) { while (--vmlinux_path__nr_entries >= 0) { @@ -1941,19 +1930,22 @@ out_free_comm_list: return -1; } -int perf_session__create_kernel_maps(struct perf_session *self) +int map_groups__create_kernel_maps(struct map_groups *self, + struct map *vmlinux_maps[MAP__NR_TYPES]) { - if (map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps, - symbol_conf.vmlinux_name) < 0) + struct dso *kernel = dsos__create_kernel(symbol_conf.vmlinux_name); + + if (kernel == NULL) + return -1; + + if (__map_groups__create_kernel_maps(self, vmlinux_maps, kernel) < 0) return -1; - if (symbol_conf.use_modules && - perf_session__create_module_maps(self) < 0) - pr_debug("Failed to load list of modules for session %s, " - "continuing...\n", self->filename); + if (symbol_conf.use_modules && map_groups__create_modules(self) < 0) + return -1; /* * Now that we have all the maps created, just set the ->end of them: */ - map_groups__fixup_end(&self->kmaps); + map_groups__fixup_end(self); return 0; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 124302778c09..e6a59e5c2bea 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -80,6 +80,12 @@ static inline void *symbol__priv(struct symbol *self) return ((void *)self) - symbol_conf.priv_size; } +struct ref_reloc_sym { + const char *name; + u64 addr; + u64 unrelocated_addr; +}; + struct addr_location { struct thread *thread; struct map *map; @@ -126,12 +132,11 @@ static inline struct dso *dsos__findnew(const char *name) return __dsos__findnew(&dsos__user, name); } -struct perf_session; - -int dso__load(struct dso *self, struct map *map, struct perf_session *session, - symbol_filter_t filter); +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); int dso__load_vmlinux_path(struct dso *self, struct map *map, - struct perf_session *session, symbol_filter_t filter); + symbol_filter_t filter); +int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, + symbol_filter_t filter); void dsos__fprintf(FILE *fp); size_t dsos__fprintf_buildid(FILE *fp, bool with_hits); @@ -156,9 +161,5 @@ int kallsyms__parse(const char *filename, void *arg, int symbol__init(void); bool symbol_type__is_a(char symbol_type, enum map_type map_type); -int perf_session__create_kernel_maps(struct perf_session *self); - -struct map *perf_session__new_module_map(struct perf_session *self, u64 start, - const char *filename); extern struct dso *vdso; #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 4a08dcf50b68..634b7f7140d5 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -282,14 +282,13 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp) } struct symbol *map_groups__find_symbol(struct map_groups *self, - struct perf_session *session, enum map_type type, u64 addr, symbol_filter_t filter) { struct map *map = map_groups__find(self, type, addr); if (map != NULL) - return map__find_symbol(map, session, map->map_ip(map, addr), filter); + return map__find_symbol(map, map->map_ip(map, addr), filter); return NULL; } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index e35653c1817c..56f317b8a06c 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -59,15 +59,14 @@ void thread__find_addr_location(struct thread *self, struct addr_location *al, symbol_filter_t filter); struct symbol *map_groups__find_symbol(struct map_groups *self, - struct perf_session *session, enum map_type type, u64 addr, symbol_filter_t filter); -static inline struct symbol * -map_groups__find_function(struct map_groups *self, struct perf_session *session, - u64 addr, symbol_filter_t filter) +static inline struct symbol *map_groups__find_function(struct map_groups *self, + u64 addr, + symbol_filter_t filter) { - return map_groups__find_symbol(self, session, MAP__FUNCTION, addr, filter); + return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter); } struct map *map_groups__find_by_name(struct map_groups *self, @@ -76,4 +75,9 @@ struct map *map_groups__find_by_name(struct map_groups *self, int __map_groups__create_kernel_maps(struct map_groups *self, struct map *vmlinux_maps[MAP__NR_TYPES], struct dso *kernel); +int map_groups__create_kernel_maps(struct map_groups *self, + struct map *vmlinux_maps[MAP__NR_TYPES]); + +struct map *map_groups__new_module(struct map_groups *self, u64 start, + const char *filename); #endif /* __PERF_THREAD_H */ -- cgit v1.2.3-58-ga151 From 6275ce2d5f44ae4f8575c24724525cbb2a3a141b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:01 -0200 Subject: perf symbols: Fixup vsyscall maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While debugging a problem reported by Pekka Enberg by printing the IP and all the maps for a thread when we don't find a map for an IP I noticed that dso__load_sym needs to fixup these extra maps it creates to hold symbols in different ELF sections than the main kernel one. Now we're back showing things like: [root@doppio linux-2.6-tip]# perf report | grep vsyscall 0.02% mutt [kernel.kallsyms].vsyscall_fn [.] vread_hpet 0.01% named [kernel.kallsyms].vsyscall_fn [.] vread_hpet 0.01% NetworkManager [kernel.kallsyms].vsyscall_fn [.] vread_hpet 0.01% gconfd-2 [kernel.kallsyms].vsyscall_0 [.] vgettimeofday 0.01% hald-addon-rfki [kernel.kallsyms].vsyscall_fn [.] vread_hpet 0.00% dbus-daemon [kernel.kallsyms].vsyscall_fn [.] vread_hpet [root@doppio linux-2.6-tip]# Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265223128-11786-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 613874260761..051d71b33df0 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1011,7 +1011,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (curr_dso == NULL) goto out_elf_end; curr_map = map__new2(start, curr_dso, - MAP__FUNCTION); + map->type); if (curr_map == NULL) { dso__delete(curr_dso); goto out_elf_end; @@ -1021,6 +1021,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, curr_dso->origin = DSO__ORIG_KERNEL; map_groups__insert(kmap->kmaps, curr_map); dsos__add(&dsos__kernel, curr_dso); + dso__set_loaded(curr_dso, map->type); } else curr_dso = curr_map->dso; @@ -1058,8 +1059,16 @@ new_symbol: /* * For misannotated, zeroed, ASM function sizes. */ - if (nr > 0) + if (nr > 0) { symbols__fixup_end(&self->symbols[map->type]); + if (kmap) { + /* + * We need to fixup this here too because we create new + * maps here, for things like vsyscall sections. + */ + __map_groups__fixup_end(kmap->kmaps, map->type); + } + } err = nr; out_elf_end: elf_end(elf); -- cgit v1.2.3-58-ga151 From 8d92c02ab07602786eaa6d4e5b519395730b3fd3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:02 -0200 Subject: perf symbols: Ditch vdso global variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can check using strcmp, most DSOs don't start with '[' so the test is cheap enough and we had to test it there anyway since when reading perf.data files we weren't calling the routine that created this global variable and thus weren't setting it as "loaded", which was causing a bogus: Failed to open [vdso], continuing without symbols Message as the first line of 'perf report'. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265223128-11786-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/map.c | 7 ++++++- tools/perf/util/symbol.c | 26 ++++---------------------- tools/perf/util/symbol.h | 6 +++++- 3 files changed, 15 insertions(+), 24 deletions(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 36ff0bf0315d..f6626cc3df2e 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -68,8 +68,13 @@ struct map *map__new(struct mmap_event *event, enum map_type type, map__init(self, type, event->start, event->start + event->len, event->pgoff, dso); - if (self->dso == vdso || anon) + if (anon) { +set_identity: self->map_ip = self->unmap_ip = identity__map_ip; + } else if (strcmp(filename, "[vdso]") == 0) { + dso__set_loaded(dso, self->type); + goto set_identity; + } } return self; out_delete: diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 051d71b33df0..e752837363ee 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -53,11 +53,6 @@ bool dso__sorted_by_name(const struct dso *self, enum map_type type) return self->sorted_by_name & (1 << type); } -static void dso__set_loaded(struct dso *self, enum map_type type) -{ - self->loaded |= (1 << type); -} - static void dso__set_sorted_by_name(struct dso *self, enum map_type type) { self->sorted_by_name |= (1 << type); @@ -1697,7 +1692,6 @@ out_fixup: LIST_HEAD(dsos__user); LIST_HEAD(dsos__kernel); -struct dso *vdso; static void dsos__add(struct list_head *head, struct dso *dso) { @@ -1790,24 +1784,12 @@ static struct dso *dsos__create_kernel(const char *vmlinux) { struct dso *kernel = dso__new_kernel(vmlinux); - if (kernel == NULL) - return NULL; - - vdso = dso__new("[vdso]"); - if (vdso == NULL) - goto out_delete_kernel_dso; - dso__set_loaded(vdso, MAP__FUNCTION); - - dso__read_running_kernel_build_id(kernel); - - dsos__add(&dsos__kernel, kernel); - dsos__add(&dsos__user, vdso); + if (kernel != NULL) { + dso__read_running_kernel_build_id(kernel); + dsos__add(&dsos__kernel, kernel); + } return kernel; - -out_delete_kernel_dso: - dso__delete(kernel); - return NULL; } int __map_groups__create_kernel_maps(struct map_groups *self, diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index e6a59e5c2bea..e90568a9e467 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -121,6 +121,11 @@ void dso__delete(struct dso *self); bool dso__loaded(const struct dso *self, enum map_type type); bool dso__sorted_by_name(const struct dso *self, enum map_type type); +static inline void dso__set_loaded(struct dso *self, enum map_type type) +{ + self->loaded |= (1 << type); +} + void dso__sort_by_name(struct dso *self, enum map_type type); extern struct list_head dsos__user, dsos__kernel; @@ -161,5 +166,4 @@ int kallsyms__parse(const char *filename, void *arg, int symbol__init(void); bool symbol_type__is_a(char symbol_type, enum map_type map_type); -extern struct dso *vdso; #endif /* __PERF_SYMBOL */ -- cgit v1.2.3-58-ga151 From 8ad94c6052649a8e32120b464eefa0ffd8f2f04f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:03 -0200 Subject: perf probe: Don't use a perf_session instance just to resolve symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the recent modifications done to untie the session and symbol layers, 'perf probe' now can use just the symbols layer. Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Masami Hiramatsu Cc: Frédéric Weisbecker Cc: Masami Hiramatsu Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 4fa73eca1d82..ad47bd4c50ef 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -41,7 +41,6 @@ #include "util/debugfs.h" #include "util/symbol.h" #include "util/thread.h" -#include "util/session.h" #include "util/parse-options.h" #include "util/parse-events.h" /* For debugfs_path */ #include "util/probe-finder.h" @@ -59,8 +58,8 @@ static struct { int nr_probe; struct probe_point probes[MAX_PROBES]; struct strlist *dellist; - struct perf_session *psession; - struct map *kmap; + struct map_groups kmap_groups; + struct map *kmaps[MAP__NR_TYPES]; struct line_range line_range; } session; @@ -122,7 +121,8 @@ static int opt_del_probe_event(const struct option *opt __used, static void evaluate_probe_point(struct probe_point *pp) { struct symbol *sym; - sym = map__find_symbol_by_name(session.kmap, pp->function, NULL); + sym = map__find_symbol_by_name(session.kmaps[MAP__FUNCTION], + pp->function, NULL); if (!sym) die("Kernel symbol \'%s\' not found - probe not added.", pp->function); @@ -131,12 +131,13 @@ static void evaluate_probe_point(struct probe_point *pp) #ifndef NO_LIBDWARF static int open_vmlinux(void) { - if (map__load(session.kmap, NULL) < 0) { + if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) { pr_debug("Failed to load kernel map.\n"); return -EINVAL; } - pr_debug("Try to open %s\n", session.kmap->dso->long_name); - return open(session.kmap->dso->long_name, O_RDONLY); + pr_debug("Try to open %s\n", + session.kmaps[MAP__FUNCTION]->dso->long_name); + return open(session.kmaps[MAP__FUNCTION]->dso->long_name, O_RDONLY); } static int opt_show_lines(const struct option *opt __used, @@ -212,12 +213,11 @@ static void init_vmlinux(void) pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name); if (symbol__init() < 0) die("Failed to init symbol map."); - session.psession = perf_session__new(NULL, O_WRONLY, false); - if (session.psession == NULL) - die("Failed to init perf_session."); - session.kmap = session.psession->vmlinux_maps[MAP__FUNCTION]; - if (!session.kmap) - die("Could not find kernel map.\n"); + + map_groups__init(&session.kmap_groups); + if (map_groups__create_kernel_maps(&session.kmap_groups, + session.kmaps) < 0) + die("Failed to create kernel maps."); } int cmd_probe(int argc, const char **argv, const char *prefix __used) -- cgit v1.2.3-58-ga151 From 7b2567c1f57c059de29d3f2ca03aca84473865c8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:04 -0200 Subject: perf build-id: Move the routine to find DSOs with hits to the lib MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because 'perf record' will have to find the build-ids in after we stop recording, so as to reduce even more the impact in the workload while we do the measurement. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265223128-11786-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 ++ tools/perf/builtin-buildid-list.c | 31 ++----------------------------- tools/perf/util/build-id.c | 39 +++++++++++++++++++++++++++++++++++++++ tools/perf/util/build-id.h | 8 ++++++++ 4 files changed, 51 insertions(+), 29 deletions(-) create mode 100644 tools/perf/util/build-id.c create mode 100644 tools/perf/util/build-id.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 42969303e20b..3a5fb36ccc97 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -357,6 +357,7 @@ LIB_H += util/include/asm/uaccess.h LIB_H += perf.h LIB_H += util/cache.h LIB_H += util/callchain.h +LIB_H += util/build-id.h LIB_H += util/debug.h LIB_H += util/debugfs.h LIB_H += util/event.h @@ -390,6 +391,7 @@ LIB_H += util/probe-event.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o +LIB_OBJS += util/build-id.o LIB_OBJS += util/config.o LIB_OBJS += util/ctype.o LIB_OBJS += util/debugfs.o diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 431f204bde64..d0675c02f81e 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -8,6 +8,7 @@ */ #include "builtin.h" #include "perf.h" +#include "util/build-id.h" #include "util/cache.h" #include "util/debug.h" #include "util/parse-options.h" @@ -33,34 +34,6 @@ static const struct option options[] = { OPT_END() }; -static int build_id_list__process_event(event_t *event, - struct perf_session *session) -{ - struct addr_location al; - u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = perf_session__findnew(session, event->ip.pid); - - if (thread == NULL) { - pr_err("problem processing %d event, skipping it.\n", - event->header.type); - return -1; - } - - thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, - event->ip.ip, &al); - - if (al.map != NULL) - al.map->dso->hit = 1; - - return 0; -} - -static struct perf_event_ops build_id_list__event_ops = { - .sample = build_id_list__process_event, - .mmap = event__process_mmap, - .fork = event__process_task, -}; - static int __cmd_buildid_list(void) { int err = -1; @@ -71,7 +44,7 @@ static int __cmd_buildid_list(void) return -1; if (with_hits) - perf_session__process_events(session, &build_id_list__event_ops); + perf_session__process_events(session, &build_id__mark_dso_hit_ops); dsos__fprintf_buildid(stdout, with_hits); diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c new file mode 100644 index 000000000000..04904b35ba81 --- /dev/null +++ b/tools/perf/util/build-id.c @@ -0,0 +1,39 @@ +/* + * build-id.c + * + * build-id support + * + * Copyright (C) 2009, 2010 Red Hat Inc. + * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo + */ +#include "build-id.h" +#include "event.h" +#include "symbol.h" +#include + +static int build_id__mark_dso_hit(event_t *event, struct perf_session *session) +{ + struct addr_location al; + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread = perf_session__findnew(session, event->ip.pid); + + if (thread == NULL) { + pr_err("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, + event->ip.ip, &al); + + if (al.map != NULL) + al.map->dso->hit = 1; + + return 0; +} + +struct perf_event_ops build_id__mark_dso_hit_ops = { + .sample = build_id__mark_dso_hit, + .mmap = event__process_mmap, + .fork = event__process_task, +}; diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h new file mode 100644 index 000000000000..1d981d63cf9a --- /dev/null +++ b/tools/perf/util/build-id.h @@ -0,0 +1,8 @@ +#ifndef PERF_BUILD_ID_H_ +#define PERF_BUILD_ID_H_ 1 + +#include "session.h" + +extern struct perf_event_ops build_id__mark_dso_hit_ops; + +#endif -- cgit v1.2.3-58-ga151 From 6122e4e4f5d0913e319ef8a4dc60a47afe4abc0a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:05 -0200 Subject: perf record: Stop intercepting events, use postprocessing to get build-ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to stream events as fast as possible to perf.data, and also in the future we want to have splice working, when no interception will be possible. Using build_id__mark_dso_hit_ops to create the list of DSOs that back MMAPs we also optimize disk usage in the build-id cache by only caching DSOs that had hits. Suggested-by: Peter Zijlstra Signed-off-by: Arnaldo Carvalho de Melo Cc: Xiao Guangrong Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1265223128-11786-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 37 +++++++++++++------------- tools/perf/util/header.c | 7 +++-- tools/perf/util/session.c | 64 +++++++++++++++++++++++++++------------------ tools/perf/util/session.h | 3 +++ tools/perf/util/symbol.c | 13 +++++---- tools/perf/util/symbol.h | 2 +- 6 files changed, 73 insertions(+), 53 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 949167efa1ed..706f00196b87 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -12,6 +12,7 @@ #include "perf.h" +#include "util/build-id.h" #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -65,6 +66,7 @@ static int nr_poll = 0; static int nr_cpu = 0; static int file_new = 1; +static off_t post_processing_offset; static struct perf_session *session; @@ -114,26 +116,10 @@ static void write_output(void *buf, size_t size) } } -static void write_event(event_t *buf, size_t size) -{ - /* - * Add it to the list of DSOs, so that when we finish this - * record session we can pick the available build-ids. - */ - if (buf->header.type == PERF_RECORD_MMAP) { - struct list_head *head = &dsos__user; - if (buf->mmap.header.misc == 1) - head = &dsos__kernel; - __dsos__findnew(head, buf->mmap.filename); - } - - write_output(buf, size); -} - static int process_synthesized_event(event_t *event, struct perf_session *self __used) { - write_event(event, event->header.size); + write_output(event, event->header.size); return 0; } @@ -185,14 +171,14 @@ static void mmap_read(struct mmap_data *md) size = md->mask + 1 - (old & md->mask); old += size; - write_event(buf, size); + write_output(buf, size); } buf = &data[old & md->mask]; size = head - old; old += size; - write_event(buf, size); + write_output(buf, size); md->prev = old; mmap_write_tail(md, old); @@ -402,10 +388,21 @@ static void open_counters(int cpu, pid_t pid) nr_cpu++; } +static int process_buildids(void) +{ + u64 size = lseek(output, 0, SEEK_CUR); + + session->fd = output; + return __perf_session__process_events(session, post_processing_offset, + size - post_processing_offset, + size, &build_id__mark_dso_hit_ops); +} + static void atexit_header(void) { session->header.data_size += bytes_written; + process_buildids(); perf_header__write(&session->header, output, true); } @@ -558,6 +555,8 @@ static int __cmd_record(int argc, const char **argv) return err; } + post_processing_offset = lseek(output, 0, SEEK_CUR); + err = event__synthesize_kernel_mmap(process_synthesized_event, session, "_text"); if (err < 0) { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ed3efd728b41..d5facd5ab1f7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -205,8 +205,11 @@ static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd) dsos__for_each_with_build_id(pos, head) { int err; struct build_id_event b; - size_t len = pos->long_name_len + 1; + size_t len; + if (!pos->hit) + continue; + len = pos->long_name_len + 1; len = ALIGN(len, NAME_ALIGN); memset(&b, 0, sizeof(b)); memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); @@ -371,7 +374,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) u64 sec_start; int idx = 0, err; - if (dsos__read_build_ids()) + if (dsos__read_build_ids(true)) perf_header__set_feat(self, HEADER_BUILD_ID); nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index aa8a03120bbd..74cbc64a3a3c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -385,8 +385,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se return thread; } -int perf_session__process_events(struct perf_session *self, - struct perf_event_ops *ops) +int __perf_session__process_events(struct perf_session *self, + u64 data_offset, u64 data_size, + u64 file_size, struct perf_event_ops *ops) { int err, mmap_prot, mmap_flags; u64 head, shift; @@ -396,32 +397,11 @@ int perf_session__process_events(struct perf_session *self, uint32_t size; char *buf; - if (perf_session__register_idle_thread(self) == NULL) - return -ENOMEM; - perf_event_ops__fill_defaults(ops); page_size = sysconf(_SC_PAGESIZE); - head = self->header.data_offset; - - if (!symbol_conf.full_paths) { - char bf[PATH_MAX]; - - if (getcwd(bf, sizeof(bf)) == NULL) { - err = -errno; -out_getcwd_err: - pr_err("failed to get the current directory\n"); - goto out_err; - } - self->cwd = strdup(bf); - if (self->cwd == NULL) { - err = -ENOMEM; - goto out_getcwd_err; - } - self->cwdlen = strlen(self->cwd); - } - + head = data_offset; shift = page_size * (head / page_size); offset += shift; head -= shift; @@ -486,10 +466,10 @@ more: head += size; - if (offset + head >= self->header.data_offset + self->header.data_size) + if (offset + head >= data_offset + data_size) goto done; - if (offset + head < self->size) + if (offset + head < file_size) goto more; done: err = 0; @@ -497,6 +477,38 @@ out_err: return err; } +int perf_session__process_events(struct perf_session *self, + struct perf_event_ops *ops) +{ + int err; + + if (perf_session__register_idle_thread(self) == NULL) + return -ENOMEM; + + if (!symbol_conf.full_paths) { + char bf[PATH_MAX]; + + if (getcwd(bf, sizeof(bf)) == NULL) { + err = -errno; +out_getcwd_err: + pr_err("failed to get the current directory\n"); + goto out_err; + } + self->cwd = strdup(bf); + if (self->cwd == NULL) { + err = -ENOMEM; + goto out_getcwd_err; + } + self->cwdlen = strlen(self->cwd); + } + + err = __perf_session__process_events(self, self->header.data_offset, + self->header.data_size, + self->size, ops); +out_err: + return err; +} + bool perf_session__has_traces(struct perf_session *self, const char *msg) { if (!(self->sample_type & PERF_SAMPLE_RAW)) { diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 752d75aebade..31950fcd8a4d 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -50,6 +50,9 @@ void perf_session__delete(struct perf_session *self); void perf_event_header__bswap(struct perf_event_header *self); +int __perf_session__process_events(struct perf_session *self, + u64 data_offset, u64 data_size, u64 size, + struct perf_event_ops *ops); int perf_session__process_events(struct perf_session *self, struct perf_event_ops *event_ops); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e752837363ee..bfb055459670 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1076,25 +1076,28 @@ static bool dso__build_id_equal(const struct dso *self, u8 *build_id) return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; } -static bool __dsos__read_build_ids(struct list_head *head) +static bool __dsos__read_build_ids(struct list_head *head, bool with_hits) { bool have_build_id = false; struct dso *pos; - list_for_each_entry(pos, head, node) + list_for_each_entry(pos, head, node) { + if (with_hits && !pos->hit) + continue; if (filename__read_build_id(pos->long_name, pos->build_id, sizeof(pos->build_id)) > 0) { have_build_id = true; pos->has_build_id = true; } + } return have_build_id; } -bool dsos__read_build_ids(void) +bool dsos__read_build_ids(bool with_hits) { - bool kbuildids = __dsos__read_build_ids(&dsos__kernel), - ubuildids = __dsos__read_build_ids(&dsos__user); + bool kbuildids = __dsos__read_build_ids(&dsos__kernel, with_hits), + ubuildids = __dsos__read_build_ids(&dsos__user, with_hits); return kbuildids || ubuildids; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index e90568a9e467..1b4192ee5300 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -157,7 +157,7 @@ struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); -bool dsos__read_build_ids(void); +bool dsos__read_build_ids(bool with_hits); int build_id__sprintf(const u8 *self, int len, char *bf); int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, -- cgit v1.2.3-58-ga151 From 29a9f66d703cb9464e24084e09edab5683e1b6b8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Feb 2010 16:52:06 -0200 Subject: perf tools: Adjust some verbosity levels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not to pollute too much 'perf annotate' debugging sessions. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265223128-11786-7-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 33 ++++++++++++--------------------- tools/perf/util/include/linux/kernel.h | 1 + tools/perf/util/symbol.c | 9 +++++---- 3 files changed, 18 insertions(+), 25 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 73c202ee0882..4fc3899bf83a 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -97,9 +97,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) sym_size = sym->end - sym->start; offset = ip - sym->start; - if (verbose) - fprintf(stderr, "%s: ip=%Lx\n", __func__, - he->map->unmap_ip(he->map, ip)); + pr_debug3("%s: ip=%#Lx\n", __func__, he->map->unmap_ip(he->map, ip)); if (offset >= sym_size) return; @@ -108,12 +106,8 @@ static void hist_hit(struct hist_entry *he, u64 ip) h->sum++; h->ip[offset]++; - if (verbose >= 3) - printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", - (void *)(unsigned long)he->sym->start, - he->sym->name, - (void *)(unsigned long)ip, ip - he->sym->start, - h->ip[offset]); + pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->sym->start, + he->sym->name, ip, ip - he->sym->start, h->ip[offset]); } static int perf_session__add_hist_entry(struct perf_session *self, @@ -136,14 +130,14 @@ static int process_sample_event(event_t *event, struct perf_session *session) event->ip.pid, event->ip.ip); if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) { - fprintf(stderr, "problem processing %d event, skipping it.\n", - event->header.type); + pr_warning("problem processing %d event, skipping it.\n", + event->header.type); return -1; } if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) { - fprintf(stderr, "problem incrementing symbol count, " - "skipping event\n"); + pr_warning("problem incrementing symbol count, " + "skipping event\n"); return -1; } @@ -378,11 +372,9 @@ static void annotate_sym(struct hist_entry *he) if (!filename) return; - if (verbose) - fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n", - __func__, filename, sym->name, - map->unmap_ip(map, sym->start), - map->unmap_ip(map, sym->end)); + pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__, + filename, sym->name, map->unmap_ip(map, sym->start), + map->unmap_ip(map, sym->end)); if (full_paths) d_filename = filename; @@ -542,9 +534,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) setup_pager(); if (field_sep && *field_sep == '.') { - fputs("'.' is the only non valid --field-separator argument\n", - stderr); - exit(129); + pr_err("'.' is the only non valid --field-separator argument\n"); + return -1; } return __cmd_annotate(); diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index 21c0274c02fa..f2611655ab51 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -101,5 +101,6 @@ simple_strtoul(const char *nptr, char **endptr, int base) eprintf(n, pr_fmt(fmt), ##__VA_ARGS__) #define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) #define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__) #endif diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index bfb055459670..a60ba2ba1044 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -137,7 +137,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name) self->start = start; self->end = len ? start + len - 1 : start; - pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); + pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); memcpy(self->name, name, namelen); @@ -1024,9 +1024,10 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } if (curr_dso->adjust_symbols) { - pr_debug2("adjusting symbol: st_value: %Lx sh_addr: " - "%Lx sh_offset: %Lx\n", (u64)sym.st_value, - (u64)shdr.sh_addr, (u64)shdr.sh_offset); + pr_debug4("%s: adjusting symbol: st_value: %#Lx " + "sh_addr: %#Lx sh_offset: %#Lx\n", __func__, + (u64)sym.st_value, (u64)shdr.sh_addr, + (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } /* -- cgit v1.2.3-58-ga151 From 7a2b6209863626cf8362e5ff4653491558f91e67 Mon Sep 17 00:00:00 2001 From: Kirill Smelkov Date: Wed, 3 Feb 2010 16:52:07 -0200 Subject: perf annotate: Fix it for non-prelinked *.so The problem was we were incorrectly calculating objdump addresses for sym->start and sym->end, look: For simple ET_DYN type DSO (*.so) with one function, objdump -dS output is something like this: 000004ac : int my_strlen(const char *s) 4ac: 55 push %ebp 4ad: 89 e5 mov %esp,%ebp 4af: 83 ec 10 sub $0x10,%esp { i.e. we have relative-to-dso-mapping IPs (=RIP) there. For ET_EXEC type and probably for prelinked libs as well (sorry can't test - I don't use prelink) objdump outputs absolute IPs, e.g. 08048604 : extern "C" int zz_strlen(const char *s) 8048604: 55 push %ebp 8048605: 89 e5 mov %esp,%ebp 8048607: 83 ec 10 sub $0x10,%esp { So, if sym->start is always relative to dso mapping(*), we'll have to unmap it for ET_EXEC like cases, and leave as is for ET_DYN cases. (*) and it is - we've explicitely made it relative. Look for adjust_symbols handling in dso__load_sym() Previously we were always unmapping sym->start and for ET_DYN dsos resulting addresses were wrong, and so objdump output was empty. The end result was that perf annotate output for symbols from non-prelinked *.so had always 0.00% percents only, which is wrong. To fix it, let's introduce a helper for converting rip to objdump address, and also let's document what map_ip() and unmap_ip() do -- I had to study sources for several hours to understand it. Signed-off-by: Kirill Smelkov Signed-off-by: Arnaldo Carvalho de Melo Cc: Mike Galbraith LKML-Reference: <1265223128-11786-8-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 5 +++-- tools/perf/util/map.c | 12 ++++++++++++ tools/perf/util/map.h | 9 +++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 4fc3899bf83a..28ea4e0c3658 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -189,7 +189,7 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len) line_ip = -1; } - start = he->map->unmap_ip(he->map, sym->start); + start = map__rip_2objdump(he->map, sym->start); if (line_ip != -1) { const char *path = NULL; @@ -397,7 +397,8 @@ static void annotate_sym(struct hist_entry *he) dso, dso->long_name, sym, sym->name); sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", - map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end), + map__rip_2objdump(map, sym->start), + map__rip_2objdump(map, sym->end), filename, filename); if (verbose >= 3) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index f6626cc3df2e..af5805f51314 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -210,3 +210,15 @@ size_t map__fprintf(struct map *self, FILE *fp) return fprintf(fp, " %Lx-%Lx %Lx %s\n", self->start, self->end, self->pgoff, self->dso->name); } + +/* + * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. + * map->dso->adjust_symbols==1 for ET_EXEC-like cases. + */ +u64 map__rip_2objdump(struct map *map, u64 rip) +{ + u64 addr = map->dso->adjust_symbols ? + map->unmap_ip(map, rip) : /* RIP -> IP */ + rip; + return addr; +} diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index de048399d776..9cee9c788dbf 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -26,8 +26,12 @@ struct map { u64 end; enum map_type type; u64 pgoff; + + /* ip -> dso rip */ u64 (*map_ip)(struct map *, u64); + /* dso rip -> ip */ u64 (*unmap_ip)(struct map *, u64); + struct dso *dso; }; @@ -56,6 +60,11 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip) return ip; } + +/* rip -> addr suitable for passing to `objdump --start-address=` */ +u64 map__rip_2objdump(struct map *map, u64 rip); + + struct symbol; struct mmap_event; -- cgit v1.2.3-58-ga151 From 6cff0e8dbaa4d5d822a814e5028683d7e71c3291 Mon Sep 17 00:00:00 2001 From: Kirill Smelkov Date: Wed, 3 Feb 2010 16:52:08 -0200 Subject: perf top: Teach it to autolocate vmlinux By relying on logic in dso__load_kernel_sym(), we can automatically load vmlinux. The only thing which needs to be adjusted, is how --sym-annotate option is handled - now we can't rely on vmlinux been loaded until full successful pass of dso__load_vmlinux(), but that's not the case if we'll do sym_filter_entry setup in symbol_filter(). So move this step right after event__process_sample() where we know the whole dso__load_kernel_sym() pass is done. By the way, though conceptually similar `perf top` still can't annotate userspace - see next patches with fixes. Signed-off-by: Kirill Smelkov Signed-off-by: Arnaldo Carvalho de Melo Cc: Mike Galbraith LKML-Reference: <1265223128-11786-9-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-top.txt | 2 +- tools/perf/builtin-top.c | 39 +++++++++++++++++++++-------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 4a7d558dc309..785b9fc32a46 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -74,7 +74,7 @@ OPTIONS -s :: --sym-annotate=:: - Annotate this symbol. Requires -k option. + Annotate this symbol. -v:: --verbose:: diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1fc018e048e1..83c09c8f28ed 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -94,6 +94,7 @@ struct source_line { static char *sym_filter = NULL; struct sym_entry *sym_filter_entry = NULL; +struct sym_entry *sym_filter_entry_sched = NULL; static int sym_pcnt_filter = 5; static int sym_counter = 0; static int display_weighted = -1; @@ -695,11 +696,9 @@ static void print_mapped_keys(void) fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); - if (symbol_conf.vmlinux_name) { - fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); - fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); - fprintf(stdout, "\t[S] stop annotation.\n"); - } + fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); + fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); + fprintf(stdout, "\t[S] stop annotation.\n"); if (nr_counters > 1) fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); @@ -725,14 +724,13 @@ static int key_mapped(int c) case 'Q': case 'K': case 'U': + case 'F': + case 's': + case 'S': return 1; case 'E': case 'w': return nr_counters > 1 ? 1 : 0; - case 'F': - case 's': - case 'S': - return symbol_conf.vmlinux_name ? 1 : 0; default: break; } @@ -910,8 +908,12 @@ static int symbol_filter(struct map *map, struct symbol *sym) syme = symbol__priv(sym); syme->map = map; syme->src = NULL; - if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) - sym_filter_entry = syme; + + if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) { + /* schedule initial sym_filter_entry setup */ + sym_filter_entry_sched = syme; + sym_filter = NULL; + } for (i = 0; skip_symbols[i]; i++) { if (!strcmp(skip_symbols[i], name)) { @@ -976,6 +978,13 @@ static void event__process_sample(const event_t *self, return; } + /* let's see, whether we need to install initial sym_filter_entry */ + if (sym_filter_entry_sched) { + sym_filter_entry = sym_filter_entry_sched; + sym_filter_entry_sched = NULL; + parse_source(sym_filter_entry); + } + syme = symbol__priv(al.sym); if (!syme->skip) { syme->count[counter]++; @@ -1270,7 +1279,7 @@ static const struct option options[] = { OPT_BOOLEAN('i', "inherit", &inherit, "child tasks inherit counters"), OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name", - "symbol to annotate - requires -k option"), + "symbol to annotate"), OPT_BOOLEAN('z', "zero", &zero, "zero history across updates"), OPT_INTEGER('F', "freq", &freq, @@ -1306,16 +1315,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) symbol_conf.priv_size = (sizeof(struct sym_entry) + (nr_counters + 1) * sizeof(unsigned long)); - if (symbol_conf.vmlinux_name == NULL) - symbol_conf.try_vmlinux_path = true; + + symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); if (symbol__init() < 0) return -1; if (delay_secs < 1) delay_secs = 1; - parse_source(sym_filter_entry); - /* * User specified count overrides default frequency. */ -- cgit v1.2.3-58-ga151 From 57d818895f9d294ab9080e5a662675fdee943ff1 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 4 Feb 2010 07:31:46 +0100 Subject: perf annotate: Fix perf top module symbol annotation Signed-off-by: Mike Galbraith Cc: Kirill Smelkov Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Frederic Weisbecker LKML-Reference: <1265265106.6364.5.camel@marge.simson.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 83c09c8f28ed..e4156bc4566d 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -204,8 +204,8 @@ static void parse_source(struct sym_entry *syme) sprintf(command, "objdump --start-address=0x%016Lx " "--stop-address=0x%016Lx -dS %s", - map->unmap_ip(map, sym->start), - map->unmap_ip(map, sym->end), path); + map__rip_2objdump(map, sym->start), + map__rip_2objdump(map, sym->end), path); file = popen(command, "r"); if (!file) -- cgit v1.2.3-58-ga151 From 615d0ebbc782b67296e3226c293f520f93f93515 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 2 Feb 2010 16:49:04 -0500 Subject: kprobes: Disable booster when CONFIG_PREEMPT=y Disable kprobe booster when CONFIG_PREEMPT=y at this time, because it can't ensure that all kernel threads preempted on kprobe's boosted slot run out from the slot even using freeze_processes(). The booster on preemptive kernel will be resumed if synchronize_tasks() or something like that is introduced. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Ananth N Mavinakayanahalli Cc: Frederic Weisbecker Cc: Jim Keniston Cc: Mathieu Desnoyers Cc: Steven Rostedt LKML-Reference: <20100202214904.4694.24330.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/ia64/kernel/kprobes.c | 2 +- arch/x86/kernel/kprobes.c | 2 +- kernel/kprobes.c | 29 ++--------------------------- 3 files changed, 4 insertions(+), 29 deletions(-) diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 9adac441ac9b..7026b29e277a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; ss_probe: -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) +#if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 5b8c7505b3bc..9453815138fa 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -429,7 +429,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) +#if !defined(CONFIG_PREEMPT) if (p->ainsn.boostable == 1 && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b7df302a0204..9907a03c29f6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -124,30 +124,6 @@ static LIST_HEAD(kprobe_insn_pages); static int kprobe_garbage_slots; static int collect_garbage_slots(void); -static int __kprobes check_safety(void) -{ - int ret = 0; -#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) - ret = freeze_processes(); - if (ret == 0) { - struct task_struct *p, *q; - do_each_thread(p, q) { - if (p != current && p->state == TASK_RUNNING && - p->pid != 0) { - printk("Check failed: %s is running\n",p->comm); - ret = -1; - goto loop_end; - } - } while_each_thread(p, q); - } -loop_end: - thaw_processes(); -#else - synchronize_sched(); -#endif - return ret; -} - /** * __get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. @@ -235,9 +211,8 @@ static int __kprobes collect_garbage_slots(void) { struct kprobe_insn_page *kip, *next; - /* Ensure no-one is preepmted on the garbages */ - if (check_safety()) - return -EAGAIN; + /* Ensure no-one is interrupted on the garbages */ + synchronize_sched(); list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { int i; -- cgit v1.2.3-58-ga151 From 2cfa19780d61740f65790c5bae363b759d7c96fa Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 2 Feb 2010 16:49:11 -0500 Subject: ftrace/alternatives: Introducing *_text_reserved functions Introducing *_text_reserved functions for checking the text address range is partially reserved or not. This patch provides checking routines for x86 smp alternatives and dynamic ftrace. Since both functions modify fixed pieces of kernel text, they should reserve and protect those from other dynamic text modifier, like kprobes. This will also be extended when introducing other subsystems which modify fixed pieces of kernel text. Dynamic text modifiers should avoid those. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: przemyslaw@pawelczyk.it Cc: Frederic Weisbecker Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Mathieu Desnoyers Cc: Jason Baron LKML-Reference: <20100202214911.4694.16587.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/alternative.h | 5 +++++ arch/x86/kernel/alternative.c | 16 ++++++++++++++++ include/linux/ftrace.h | 6 ++++++ kernel/trace/ftrace.c | 15 +++++++++++++++ 4 files changed, 42 insertions(+) diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 69b74a7b877f..ac80b7d70014 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -65,12 +65,17 @@ extern void alternatives_smp_module_add(struct module *mod, char *name, void *text, void *text_end); extern void alternatives_smp_module_del(struct module *mod); extern void alternatives_smp_switch(int smp); +extern int alternatives_text_reserved(void *start, void *end); #else static inline void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) {} static inline void alternatives_smp_module_del(struct module *mod) {} static inline void alternatives_smp_switch(int smp) {} +static inline int alternatives_text_reserved(void *start, void *end) +{ + return 0; +} #endif /* CONFIG_SMP */ /* alternative assembly primitive: */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index de7353c0ce9c..3c13284ff86d 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -390,6 +390,22 @@ void alternatives_smp_switch(int smp) mutex_unlock(&smp_alt); } +/* Return 1 if the address range is reserved for smp-alternatives */ +int alternatives_text_reserved(void *start, void *end) +{ + struct smp_alt_module *mod; + u8 **ptr; + + list_for_each_entry(mod, &smp_alt_modules, next) { + if (mod->text > end || mod->text_end < start) + continue; + for (ptr = mod->locks; ptr < mod->locks_end; ptr++) + if (start <= *ptr && end >= *ptr) + return 1; + } + + return 0; +} #endif #ifdef CONFIG_PARAVIRT diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 0b4f97d24d7f..9d127efed43c 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -134,6 +134,8 @@ extern void unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); extern void unregister_ftrace_function_probe_all(char *glob); +extern int ftrace_text_reserved(void *start, void *end); + enum { FTRACE_FL_FREE = (1 << 0), FTRACE_FL_FAILED = (1 << 1), @@ -250,6 +252,10 @@ static inline int unregister_ftrace_command(char *cmd_name) { return -EINVAL; } +static inline int ftrace_text_reserved(void *start, void *end) +{ + return 0; +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1e6640f80454..3d90661a5f40 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1025,6 +1025,21 @@ static void ftrace_bug(int failed, unsigned long ip) } +/* Return 1 if the address range is reserved for ftrace */ +int ftrace_text_reserved(void *start, void *end) +{ + struct dyn_ftrace *rec; + struct ftrace_page *pg; + + do_for_each_ftrace_rec(pg, rec) { + if (rec->ip <= (unsigned long)end && + rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start) + return 1; + } while_for_each_ftrace_rec(); + return 0; +} + + static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) { -- cgit v1.2.3-58-ga151 From 4554dbcb85a4ed2abaa2b6fa15649b796699ec89 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 2 Feb 2010 16:49:18 -0500 Subject: kprobes: Check probe address is reserved Check whether the address of new probe is already reserved by ftrace or alternatives (on x86) when registering new probe. If reserved, it returns an error and not register the probe. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: przemyslaw@pawelczyk.it Cc: Frederic Weisbecker Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Mathieu Desnoyers Cc: Jason Baron LKML-Reference: <20100202214918.4694.94179.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 3 +++ kernel/kprobes.c | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 9453815138fa..5de9f4a9c3fd 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -337,6 +337,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p) { + if (alternatives_text_reserved(p->addr, p->addr)) + return -EINVAL; + if (!can_probe((unsigned long)p->addr)) return -EILSEQ; /* insn: must be on special executable page on x86. */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9907a03c29f6..c3340e836c37 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -703,7 +704,8 @@ int __kprobes register_kprobe(struct kprobe *p) preempt_disable(); if (!kernel_text_address((unsigned long) p->addr) || - in_kprobes_functions((unsigned long) p->addr)) { + in_kprobes_functions((unsigned long) p->addr) || + ftrace_text_reserved(p->addr, p->addr)) { preempt_enable(); return -EINVAL; } -- cgit v1.2.3-58-ga151 From f24bb999d2b9f2950e5cac5b69bffedf73c24ea4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 2 Feb 2010 16:49:25 -0500 Subject: ftrace: Remove record freezing Remove record freezing. Because kprobes never puts probe on ftrace's mcount call anymore, it doesn't need ftrace to check whether kprobes on it. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: przemyslaw@pawelczyk.it Cc: Frederic Weisbecker LKML-Reference: <20100202214925.4694.73469.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- include/linux/ftrace.h | 1 - kernel/trace/ftrace.c | 39 --------------------------------------- 2 files changed, 40 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9d127efed43c..eb054ae95605 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -143,7 +143,6 @@ enum { FTRACE_FL_ENABLED = (1 << 3), FTRACE_FL_NOTRACE = (1 << 4), FTRACE_FL_CONVERTED = (1 << 5), - FTRACE_FL_FROZEN = (1 << 6), }; struct dyn_ftrace { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3d90661a5f40..1904797f4a8a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records; } \ } -#ifdef CONFIG_KPROBES - -static int frozen_record_count; - -static inline void freeze_record(struct dyn_ftrace *rec) -{ - if (!(rec->flags & FTRACE_FL_FROZEN)) { - rec->flags |= FTRACE_FL_FROZEN; - frozen_record_count++; - } -} - -static inline void unfreeze_record(struct dyn_ftrace *rec) -{ - if (rec->flags & FTRACE_FL_FROZEN) { - rec->flags &= ~FTRACE_FL_FROZEN; - frozen_record_count--; - } -} - -static inline int record_frozen(struct dyn_ftrace *rec) -{ - return rec->flags & FTRACE_FL_FROZEN; -} -#else -# define freeze_record(rec) ({ 0; }) -# define unfreeze_record(rec) ({ 0; }) -# define record_frozen(rec) ({ 0; }) -#endif /* CONFIG_KPROBES */ - static void ftrace_free_rec(struct dyn_ftrace *rec) { rec->freelist = ftrace_free_records; @@ -1091,14 +1060,6 @@ static void ftrace_replace_code(int enable) !(rec->flags & FTRACE_FL_CONVERTED)) continue; - /* ignore updates to this record's mcount site */ - if (get_kprobe((void *)rec->ip)) { - freeze_record(rec); - continue; - } else { - unfreeze_record(rec); - } - failed = __ftrace_replace_code(rec, enable); if (failed) { rec->flags |= FTRACE_FL_FAILED; -- cgit v1.2.3-58-ga151 From 9717e6cd3db22eade7dbae0fc9235c66325a7132 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Jan 2010 13:57:44 +0100 Subject: perf_events: Optimize perf_event_task_tick() Pretty much all of the calls do perf_disable/perf_enable cycles, pull that out to cut back on hardware programming. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 40f8b07c5601..087025fe3ba1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1573,12 +1573,8 @@ static void rotate_ctx(struct perf_event_context *ctx) raw_spin_lock(&ctx->lock); /* Rotate the first entry last of non-pinned groups */ - perf_disable(); - list_rotate_left(&ctx->flexible_groups); - perf_enable(); - raw_spin_unlock(&ctx->lock); } @@ -1593,6 +1589,8 @@ void perf_event_task_tick(struct task_struct *curr) cpuctx = &__get_cpu_var(perf_cpu_context); ctx = curr->perf_event_ctxp; + perf_disable(); + perf_ctx_adjust_freq(&cpuctx->ctx); if (ctx) perf_ctx_adjust_freq(ctx); @@ -1608,6 +1606,8 @@ void perf_event_task_tick(struct task_struct *curr) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_in(curr, EVENT_FLEXIBLE); + + perf_enable(); } static int event_enable_on_exec(struct perf_event *event, -- cgit v1.2.3-58-ga151 From 8c48e444191de0ff84e85d41180d7bc3e74f14ef Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 29 Jan 2010 13:25:31 +0100 Subject: perf_events, x86: Implement intel core solo/duo support Implement Intel Core Solo/Duo, aka. Intel Architectural Performance Monitoring Version 1. Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Arjan van de Ven LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 133 ++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 72 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1846ead0576b..5b91992b6b25 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -227,6 +227,17 @@ static const u64 intel_perfmon_event_map[] = }; static struct event_constraint intel_core_event_constraints[] = +{ + INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ + INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ + INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint intel_core2_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ @@ -1216,7 +1227,7 @@ static void intel_pmu_disable_all(void) intel_pmu_disable_bts(); } -static void amd_pmu_disable_all(void) +static void x86_pmu_disable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; @@ -1226,11 +1237,11 @@ static void amd_pmu_disable_all(void) if (!test_bit(idx, cpuc->active_mask)) continue; - rdmsrl(MSR_K7_EVNTSEL0 + idx, val); + rdmsrl(x86_pmu.eventsel + idx, val); if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) continue; val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + wrmsrl(x86_pmu.eventsel + idx, val); } } @@ -1278,7 +1289,7 @@ static void intel_pmu_enable_all(void) } } -static void amd_pmu_enable_all(void) +static void x86_pmu_enable_all(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; @@ -1292,7 +1303,7 @@ static void amd_pmu_enable_all(void) val = event->hw.config; val |= ARCH_PERFMON_EVENTSEL0_ENABLE; - wrmsrl(MSR_K7_EVNTSEL0 + idx, val); + wrmsrl(x86_pmu.eventsel + idx, val); } } @@ -1546,7 +1557,7 @@ static inline void intel_pmu_ack_status(u64 ack) wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) { (void)checking_wrmsrl(hwc->config_base + idx, hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); @@ -1598,12 +1609,6 @@ intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) x86_pmu_disable_event(hwc, idx); } -static inline void -amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) -{ - x86_pmu_disable_event(hwc, idx); -} - static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); /* @@ -1723,15 +1728,14 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) return; } - x86_pmu_enable_event(hwc, idx); + __x86_pmu_enable_event(hwc, idx); } -static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - if (cpuc->enabled) - x86_pmu_enable_event(hwc, idx); + __x86_pmu_enable_event(hwc, idx); } /* @@ -1988,50 +1992,6 @@ static void intel_pmu_reset(void) local_irq_restore(flags); } -static int p6_pmu_handle_irq(struct pt_regs *regs) -{ - struct perf_sample_data data; - struct cpu_hw_events *cpuc; - struct perf_event *event; - struct hw_perf_event *hwc; - int idx, handled = 0; - u64 val; - - data.addr = 0; - data.raw = NULL; - - cpuc = &__get_cpu_var(cpu_hw_events); - - for (idx = 0; idx < x86_pmu.num_events; idx++) { - if (!test_bit(idx, cpuc->active_mask)) - continue; - - event = cpuc->events[idx]; - hwc = &event->hw; - - val = x86_perf_event_update(event, hwc, idx); - if (val & (1ULL << (x86_pmu.event_bits - 1))) - continue; - - /* - * event overflow - */ - handled = 1; - data.period = event->hw.last_period; - - if (!x86_perf_event_set_period(event, hwc, idx)) - continue; - - if (perf_event_overflow(event, 1, &data, regs)) - p6_pmu_disable_event(hwc, idx); - } - - if (handled) - inc_irq_stat(apic_perf_irqs); - - return handled; -} - /* * This handler is triggered by the local APIC, so the APIC IRQ handling * rules apply: @@ -2098,7 +2058,7 @@ again: return 1; } -static int amd_pmu_handle_irq(struct pt_regs *regs) +static int x86_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; struct cpu_hw_events *cpuc; @@ -2133,7 +2093,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) continue; if (perf_event_overflow(event, 1, &data, regs)) - amd_pmu_disable_event(hwc, idx); + x86_pmu.disable(hwc, idx); } if (handled) @@ -2374,7 +2334,7 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { static __initconst struct x86_pmu p6_pmu = { .name = "p6", - .handle_irq = p6_pmu_handle_irq, + .handle_irq = x86_pmu_handle_irq, .disable_all = p6_pmu_disable_all, .enable_all = p6_pmu_enable_all, .enable = p6_pmu_enable_event, @@ -2401,6 +2361,29 @@ static __initconst struct x86_pmu p6_pmu = { .event_constraints = intel_p6_event_constraints }; +static __initconst struct x86_pmu core_pmu = { + .name = "core", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, + .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, + .perfctr = MSR_ARCH_PERFMON_PERFCTR0, + .event_map = intel_pmu_event_map, + .raw_event = intel_pmu_raw_event, + .max_events = ARRAY_SIZE(intel_perfmon_event_map), + .apic = 1, + /* + * Intel PMCs cannot be accessed sanely above 32 bit width, + * so we install an artificial 1<<31 period regardless of + * the generic event period: + */ + .max_period = (1ULL << 31) - 1, + .get_event_constraints = intel_get_event_constraints, + .event_constraints = intel_core_event_constraints, +}; + static __initconst struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -2427,11 +2410,11 @@ static __initconst struct x86_pmu intel_pmu = { static __initconst struct x86_pmu amd_pmu = { .name = "AMD", - .handle_irq = amd_pmu_handle_irq, - .disable_all = amd_pmu_disable_all, - .enable_all = amd_pmu_enable_all, - .enable = amd_pmu_enable_event, - .disable = amd_pmu_disable_event, + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .event_map = amd_pmu_event_map, @@ -2498,9 +2481,10 @@ static __init int intel_pmu_init(void) version = eax.split.version_id; if (version < 2) - return -ENODEV; + x86_pmu = core_pmu; + else + x86_pmu = intel_pmu; - x86_pmu = intel_pmu; x86_pmu.version = version; x86_pmu.num_events = eax.split.num_events; x86_pmu.event_bits = eax.split.bit_width; @@ -2510,12 +2494,17 @@ static __init int intel_pmu_init(void) * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events: */ - x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); + if (version > 1) + x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); /* * Install the hw-cache-events table: */ switch (boot_cpu_data.x86_model) { + case 14: /* 65 nm core solo/duo, "Yonah" */ + pr_cont("Core events, "); + break; + case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ @@ -2523,7 +2512,7 @@ static __init int intel_pmu_init(void) memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = intel_core_event_constraints; + x86_pmu.event_constraints = intel_core2_event_constraints; pr_cont("Core2 events, "); break; -- cgit v1.2.3-58-ga151 From fce877e3a429940a986e085a41e8b57f2d922e36 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 29 Jan 2010 13:25:12 +0100 Subject: bitops: Ensure the compile time HWEIGHT is only used for such Avoid accidental misuse by failing to compile things Suggested-by: Andrew Morton Signed-off-by: Peter Zijlstra Cc: Linus Torvalds LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 10 +++++++--- include/linux/bitops.h | 33 ++++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5b91992b6b25..96cfc1a4fe9f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -93,13 +93,16 @@ struct cpu_hw_events { struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ }; -#define EVENT_CONSTRAINT(c, n, m) { \ +#define __EVENT_CONSTRAINT(c, n, m, w) {\ { .idxmsk64[0] = (n) }, \ .code = (c), \ .cmask = (m), \ - .weight = HWEIGHT64((u64)(n)), \ + .weight = (w), \ } +#define EVENT_CONSTRAINT(c, n, m) \ + __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) + #define INTEL_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) @@ -2622,7 +2625,8 @@ void __init init_hw_perf_events(void) register_die_notifier(&perf_event_nmi_notifier); unconstrained = (struct event_constraint) - EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0); + __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, + 0, x86_pmu.num_events); pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.event_bits); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index ba0fd1eb4af7..25b8b2f33ae9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -45,19 +45,30 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } -#define HWEIGHT8(w) \ - ( (!!((w) & (1ULL << 0))) + \ - (!!((w) & (1ULL << 1))) + \ - (!!((w) & (1ULL << 2))) + \ - (!!((w) & (1ULL << 3))) + \ - (!!((w) & (1ULL << 4))) + \ - (!!((w) & (1ULL << 5))) + \ - (!!((w) & (1ULL << 6))) + \ +/* + * Clearly slow versions of the hweightN() functions, their benefit is + * of course compile time evaluation of constant arguments. + */ +#define HWEIGHT8(w) \ + ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \ + (!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ (!!((w) & (1ULL << 7))) ) -#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8(w >> 8)) -#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16(w >> 16)) -#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32(w >> 32)) +#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8)) +#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16)) +#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32)) + +/* + * Type invariant version that simply casts things to the + * largest type. + */ +#define HWEIGHT(w) HWEIGHT64((u64)(w)) /** * rol32 - rotate a 32-bit value left -- cgit v1.2.3-58-ga151 From 447a194b393f32699607fd99617a40abd6a95114 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 1 Feb 2010 14:50:01 +0200 Subject: perf_events, x86: Fix bug in hw_perf_enable() We cannot assume that because hwc->idx == assign[i], we can avoid reprogramming the counter in hw_perf_enable(). The event may have been scheduled out and another event may have been programmed into this counter. Thus, we need a more robust way of verifying if the counter still contains config/data related to an event. This patch adds a generation number to each counter on each cpu. Using this mechanism we can verify reliabilty whether the content of a counter corresponds to an event. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <4b66dc67.0b38560a.1635.ffffae18@mx.google.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 34 ++++++++++++++++++++++++++++------ include/linux/perf_event.h | 2 ++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 96cfc1a4fe9f..a920f173a220 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -90,6 +90,7 @@ struct cpu_hw_events { int n_events; int n_added; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ + u64 tags[X86_PMC_IDX_MAX]; struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ }; @@ -1142,6 +1143,8 @@ static int __hw_perf_event_init(struct perf_event *event) hwc->config = ARCH_PERFMON_EVENTSEL_INT; hwc->idx = -1; + hwc->last_cpu = -1; + hwc->last_tag = ~0ULL; /* * Count user and OS events unless requested not to. @@ -1457,11 +1460,14 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, return n; } - static inline void x86_assign_hw_event(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct cpu_hw_events *cpuc, int i) { - hwc->idx = idx; + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = cpuc->assign[i]; + hwc->last_cpu = smp_processor_id(); + hwc->last_tag = ++cpuc->tags[i]; if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { hwc->config_base = 0; @@ -1480,6 +1486,15 @@ static inline void x86_assign_hw_event(struct perf_event *event, } } +static inline int match_prev_assignment(struct hw_perf_event *hwc, + struct cpu_hw_events *cpuc, + int i) +{ + return hwc->idx == cpuc->assign[i] && + hwc->last_cpu == smp_processor_id() && + hwc->last_tag == cpuc->tags[i]; +} + static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc); void hw_perf_enable(void) @@ -1508,7 +1523,14 @@ void hw_perf_enable(void) event = cpuc->event_list[i]; hwc = &event->hw; - if (hwc->idx == -1 || hwc->idx == cpuc->assign[i]) + /* + * we can avoid reprogramming counter if: + * - assigned same counter as last time + * - running on same CPU as last time + * - no other event has used the counter since + */ + if (hwc->idx == -1 || + match_prev_assignment(hwc, cpuc, i)) continue; __x86_pmu_disable(event, cpuc); @@ -1522,12 +1544,12 @@ void hw_perf_enable(void) hwc = &event->hw; if (hwc->idx == -1) { - x86_assign_hw_event(event, hwc, cpuc->assign[i]); + x86_assign_hw_event(event, cpuc, i); x86_perf_event_set_period(event, hwc, hwc->idx); } /* * need to mark as active because x86_pmu_disable() - * clear active_mask and eventsp[] yet it preserves + * clear active_mask and events[] yet it preserves * idx */ set_bit(hwc->idx, cpuc->active_mask); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 556b0f4a668e..071a7db52549 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -478,9 +478,11 @@ struct hw_perf_event { union { struct { /* hardware */ u64 config; + u64 last_tag; unsigned long config_base; unsigned long event_base; int idx; + int last_cpu; }; struct { /* software */ s64 remaining; -- cgit v1.2.3-58-ga151 From f887f3019e56389a73617f4e70f512e82cc89adb Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 4 Feb 2010 16:46:42 +0800 Subject: perf tools: Clean up O_LARGEFILE et al usage Setting _FILE_OFFSET_BITS and using O_LARGEFILE, lseek64, etc, is redundant. Thanks H. Peter Anvin for pointing it out. So, this patch removes O_LARGEFILE, lseek64, etc. Suggested-by: "H. Peter Anvin" Signed-off-by: Xiao Guangrong Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4B6A8972.3070605@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 3 +-- tools/perf/util/header.c | 21 ++++++++++----------- tools/perf/util/session.c | 3 +-- tools/perf/util/trace-event-read.c | 20 ++++++++++---------- 4 files changed, 22 insertions(+), 25 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 706f00196b87..3ad599b12c91 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -5,7 +5,6 @@ * (or a CPU, or a PID) into the perf.data output file - for * later analysis via perf report. */ -#define _LARGEFILE64_SOURCE #define _FILE_OFFSET_BITS 64 #include "builtin.h" @@ -451,7 +450,7 @@ static int __cmd_record(int argc, const char **argv) append_file = 0; } - flags = O_CREAT|O_RDWR|O_LARGEFILE; + flags = O_CREAT|O_RDWR; if (append_file) file_new = 0; else diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d5facd5ab1f7..6c9aa16ee51f 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1,4 +1,3 @@ -#define _LARGEFILE64_SOURCE #define _FILE_OFFSET_BITS 64 #include @@ -388,7 +387,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) sec_size = sizeof(*feat_sec) * nr_sections; sec_start = self->data_offset + self->data_size; - lseek64(fd, sec_start + sec_size, SEEK_SET); + lseek(fd, sec_start + sec_size, SEEK_SET); if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { struct perf_file_section *trace_sec; @@ -396,9 +395,9 @@ static int perf_header__adds_write(struct perf_header *self, int fd) trace_sec = &feat_sec[idx++]; /* Write trace info */ - trace_sec->offset = lseek64(fd, 0, SEEK_CUR); + trace_sec->offset = lseek(fd, 0, SEEK_CUR); read_tracing_data(fd, attrs, nr_counters); - trace_sec->size = lseek64(fd, 0, SEEK_CUR) - trace_sec->offset; + trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; } @@ -408,18 +407,18 @@ static int perf_header__adds_write(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; /* Write build-ids */ - buildid_sec->offset = lseek64(fd, 0, SEEK_CUR); + buildid_sec->offset = lseek(fd, 0, SEEK_CUR); err = dsos__write_buildid_table(fd); if (err < 0) { pr_debug("failed to write buildid table\n"); goto out_free; } - buildid_sec->size = lseek64(fd, 0, SEEK_CUR) - - buildid_sec->offset; + buildid_sec->size = lseek(fd, 0, SEEK_CUR) - + buildid_sec->offset; dsos__cache_build_ids(); } - lseek64(fd, sec_start, SEEK_SET); + lseek(fd, sec_start, SEEK_SET); err = do_write(fd, feat_sec, sec_size); if (err < 0) pr_debug("failed to write feature section\n"); @@ -513,7 +512,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) pr_debug("failed to write perf header\n"); return err; } - lseek64(fd, self->data_offset + self->data_size, SEEK_SET); + lseek(fd, self->data_offset + self->data_size, SEEK_SET); self->frozen = 1; return 0; @@ -567,7 +566,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, sec_size = sizeof(*feat_sec) * nr_sections; - lseek64(fd, self->data_offset + self->data_size, SEEK_SET); + lseek(fd, self->data_offset + self->data_size, SEEK_SET); if (perf_header__getbuffer64(self, fd, feat_sec, sec_size)) goto out_free; @@ -641,7 +640,7 @@ static int perf_file_section__process(struct perf_file_section *self, struct perf_header *ph, int feat, int fd) { - if (lseek64(fd, self->offset, SEEK_SET) < 0) { + if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) { pr_debug("Failed to lseek to %Ld offset for feature %d, " "continuing...\n", self->offset, feat); return 0; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 74cbc64a3a3c..0de7258e70a5 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1,4 +1,3 @@ -#define _LARGEFILE64_SOURCE #define _FILE_OFFSET_BITS 64 #include @@ -15,7 +14,7 @@ static int perf_session__open(struct perf_session *self, bool force) { struct stat input_stat; - self->fd = open(self->filename, O_RDONLY|O_LARGEFILE); + self->fd = open(self->filename, O_RDONLY); if (self->fd < 0) { pr_err("failed to open file: %s", self->filename); if (!strcmp(self->filename, "perf.data")) diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index ca3c26d466f3..7cd1193918c7 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -18,7 +18,7 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 #include #include @@ -83,7 +83,7 @@ static char *read_string(void) char *str = NULL; int size = 0; int i; - s64 r; + off_t r; for (;;) { r = read(input_fd, buf, BUFSIZ); @@ -117,8 +117,8 @@ static char *read_string(void) i++; /* move the file descriptor to the end of the string */ - r = lseek64(input_fd, -(r - i), SEEK_CUR); - if (r < 0) + r = lseek(input_fd, -(r - i), SEEK_CUR); + if (r == (off_t)-1) die("lseek"); if (str) { @@ -282,8 +282,8 @@ static void update_cpu_data_index(int cpu) static void get_next_page(int cpu) { - off64_t save_seek; - off64_t ret; + off_t save_seek; + off_t ret; if (!cpu_data[cpu].page) return; @@ -298,17 +298,17 @@ static void get_next_page(int cpu) update_cpu_data_index(cpu); /* other parts of the code may expect the pointer to not move */ - save_seek = lseek64(input_fd, 0, SEEK_CUR); + save_seek = lseek(input_fd, 0, SEEK_CUR); - ret = lseek64(input_fd, cpu_data[cpu].offset, SEEK_SET); - if (ret < 0) + ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET); + if (ret == (off_t)-1) die("failed to lseek"); ret = read(input_fd, cpu_data[cpu].page, page_size); if (ret < 0) die("failed to read page"); /* reset the file pointer back */ - lseek64(input_fd, save_seek, SEEK_SET); + lseek(input_fd, save_seek, SEEK_SET); return; } -- cgit v1.2.3-58-ga151 From 2161db969313cb94ffd9377a525fb75c3fee9eeb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 4 Feb 2010 10:22:01 +0100 Subject: perf tools: Fix session init on non-modular kernels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit perf top and perf record refuses to initialize on non-modular kernels: refuse to initialize: $ perf top -v map_groups__set_modules_path_dir: cannot open /lib/modules/2.6.33-rc6-tip-00586-g398dde3-dirty/ Cc: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265223128-11786-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a60ba2ba1044..6882e9fec2d6 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1937,7 +1937,7 @@ int map_groups__create_kernel_maps(struct map_groups *self, return -1; if (symbol_conf.use_modules && map_groups__create_modules(self) < 0) - return -1; + return 0; /* * Now that we have all the maps created, just set the ->end of them: */ -- cgit v1.2.3-58-ga151 From 5ecaafdbf44b1ba400b746c60c401d54c7ee0863 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 5 Feb 2010 01:24:34 -0500 Subject: kprobes: Add mcount to the kprobes blacklist Since mcount function can be called from everywhere, it should be blacklisted. Moreover, the "mcount" symbol is a special symbol name. So, it is better to put it in the generic blacklist. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Ananth N Mavinakayanahalli Cc: Steven Rostedt LKML-Reference: <20100205062433.3745.36726.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c3340e836c37..ccec774c716d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -94,6 +94,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = { {"native_get_debugreg",}, {"irq_entries_start",}, {"common_interrupt",}, + {"mcount",}, /* mcount can be called from everywhere */ {NULL} /* Terminator */ }; -- cgit v1.2.3-58-ga151 From ee11b90b12eb1ec25e1044bac861e90bfd19ec9e Mon Sep 17 00:00:00 2001 From: Kirill Smelkov Date: Sun, 7 Feb 2010 11:46:15 -0200 Subject: perf top: Fix annotate for userspace First, for programs and prelinked libraries, annotate code was fooled by objdump output IPs (src->eip in the code) being wrongly converted to absolute IPs. In such case there were no conversion needed, but in src->eip = strtoull(src->line, NULL, 16); src->eip = map->unmap_ip(map, src->eip); // = eip + map->start - map->pgoff we were reading absolute address from objdump (e.g. 8048604) and then almost doubling it, because eip & map->start are approximately close for small programs. Needless to say, that later, in record_precise_ip() there was no matching with real runtime IPs. And second, like with `perf annotate` the problem with non-prelinked *.so was that we were doing rip -> objdump address conversion wrong. Also, because unlike `perf annotate`, `perf top` code does annotation based on absolute IPs for performance reasons(*), new helper for mapping objdump addresse to IP is introduced. (*) we get samples info in absolute IPs, and since we do lots of hit-testing on absolute IPs at runtime in record_precise_ip(), it's better to convert objdump addresses to IPs once and do no conversion at runtime. I also had to fix how objdump output is parsed (with hardcoded 8/16 characters format, which was inappropriate for ET_DYN dsos with small addresses like '4ac') Also note, that not all objdump output lines has associtated IPs, e.g. look at source lines here: 000004ac : extern "C" int my_strlen(const char *s) 4ac: 55 push %ebp 4ad: 89 e5 mov %esp,%ebp 4af: 83 ec 10 sub $0x10,%esp { int len = 0; 4b2: c7 45 fc 00 00 00 00 movl $0x0,-0x4(%ebp) 4b9: eb 08 jmp 4c3 while (*s) { ++len; 4bb: 83 45 fc 01 addl $0x1,-0x4(%ebp) ++s; 4bf: 83 45 08 01 addl $0x1,0x8(%ebp) So we mark them with eip=0, and ignore such lines in annotate lookup code. Signed-off-by: Kirill Smelkov [ Note: one hunk of this patch was applied by Mike in 57d8188 ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Mike Galbraith LKML-Reference: <1265550376-12665-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 18 +++++++++--------- tools/perf/util/map.c | 8 ++++++++ tools/perf/util/map.h | 4 ++-- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index e4156bc4566d..befa57e2284d 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -216,7 +216,7 @@ static void parse_source(struct sym_entry *syme) while (!feof(file)) { struct source_line *src; size_t dummy = 0; - char *c; + char *c, *sep; src = malloc(sizeof(struct source_line)); assert(src != NULL); @@ -235,14 +235,11 @@ static void parse_source(struct sym_entry *syme) *source->lines_tail = src; source->lines_tail = &src->next; - if (strlen(src->line)>8 && src->line[8] == ':') { - src->eip = strtoull(src->line, NULL, 16); - src->eip = map->unmap_ip(map, src->eip); - } - if (strlen(src->line)>8 && src->line[16] == ':') { - src->eip = strtoull(src->line, NULL, 16); - src->eip = map->unmap_ip(map, src->eip); - } + src->eip = strtoull(src->line, &sep, 16); + if (*sep == ':') + src->eip = map__objdump_2ip(map, src->eip); + else /* this line has no ip info (e.g. source line) */ + src->eip = 0; } pclose(file); out_assign: @@ -277,6 +274,9 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) goto out_unlock; for (line = syme->src->lines; line; line = line->next) { + /* skip lines without IP info */ + if (line->eip == 0) + continue; if (line->eip == ip) { line->count[counter]++; break; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index af5805f51314..138e3cb2b727 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -222,3 +222,11 @@ u64 map__rip_2objdump(struct map *map, u64 rip) rip; return addr; } + +u64 map__objdump_2ip(struct map *map, u64 addr) +{ + u64 ip = map->dso->adjust_symbols ? + addr : + map->unmap_ip(map, addr); /* RIP -> IP */ + return ip; +} diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 9cee9c788dbf..86f77cb1d060 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -61,9 +61,9 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip) } -/* rip -> addr suitable for passing to `objdump --start-address=` */ +/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ u64 map__rip_2objdump(struct map *map, u64 rip); - +u64 map__objdump_2ip(struct map *map, u64 addr); struct symbol; struct mmap_event; -- cgit v1.2.3-58-ga151 From 5f485364365f00853e5249cb3ae31f876936b552 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 7 Feb 2010 11:46:16 -0200 Subject: perf top: Use address pattern in lookup_sym_source MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because we may have aliases, like __GI___strcoll_l in /lib64/libc-2.10.2.so that appears in objdump as: $ objdump --start-address=0x0000003715a86420 \ --stop-address=0x0000003715a872dc -dS /lib64/libc-2.10.2.so 0000003715a86420 <__strcoll_l>: 3715a86420: 55 push %rbp 3715a86421: 48 89 e5 mov %rsp,%rbp 3715a86424: 41 57 push %r15 [root@doppio linux-2.6-tip]# So look for the address exactly at the start of the line instead so that annotation can work for in these cases. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Kirill Smelkov Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1265550376-12665-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index befa57e2284d..c72ab50d65ca 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -202,10 +202,9 @@ static void parse_source(struct sym_entry *syme) len = sym->end - sym->start; sprintf(command, - "objdump --start-address=0x%016Lx " - "--stop-address=0x%016Lx -dS %s", - map__rip_2objdump(map, sym->start), - map__rip_2objdump(map, sym->end), path); + "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s", + BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start), + BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path); file = popen(command, "r"); if (!file) @@ -292,13 +291,15 @@ static void lookup_sym_source(struct sym_entry *syme) { struct symbol *symbol = sym_entry__symbol(syme); struct source_line *line; - char pattern[PATH_MAX]; + const size_t pattern_len = BITS_PER_LONG / 4 + 2; + char pattern[pattern_len + 1]; - sprintf(pattern, "<%s>:", symbol->name); + sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4, + map__rip_2objdump(syme->map, symbol->start)); pthread_mutex_lock(&syme->src->lock); for (line = syme->src->lines; line; line = line->next) { - if (strstr(line->line, pattern)) { + if (memcmp(line->line, pattern, pattern_len) == 0) { syme->src->source = line; break; } -- cgit v1.2.3-58-ga151 From 076dc4a65a6d99a16979e2c7917e669fb8c91ee5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 5 Feb 2010 12:16:47 -0500 Subject: x86/alternatives: Fix build warning Fixes these warnings: arch/x86/kernel/alternative.c: In function 'alternatives_text_reserved': arch/x86/kernel/alternative.c:402: warning: comparison of distinct pointer types lacks a cast arch/x86/kernel/alternative.c:402: warning: comparison of distinct pointer types lacks a cast arch/x86/kernel/alternative.c:405: warning: comparison of distinct pointer types lacks a cast arch/x86/kernel/alternative.c:405: warning: comparison of distinct pointer types lacks a cast Caused by: 2cfa197: ftrace/alternatives: Introducing *_text_reserved functions Changes in v2: - Use local variables to compare, instead of type casts. Reported-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE LKML-Reference: <20100205171647.15750.37221.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3c13284ff86d..e63b80e5861c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -395,12 +395,14 @@ int alternatives_text_reserved(void *start, void *end) { struct smp_alt_module *mod; u8 **ptr; + u8 *text_start = start; + u8 *text_end = end; list_for_each_entry(mod, &smp_alt_modules, next) { - if (mod->text > end || mod->text_end < start) + if (mod->text > text_end || mod->text_end < text_start) continue; for (ptr = mod->locks; ptr < mod->locks_end; ptr++) - if (start <= *ptr && end >= *ptr) + if (text_start <= *ptr && text_end >= *ptr) return 1; } -- cgit v1.2.3-58-ga151 From f7e7ee36757f68778700cde1aaed89e1d23e59fd Mon Sep 17 00:00:00 2001 From: "austin_zhang@linux.intel.com" Date: Fri, 5 Feb 2010 09:02:42 -0800 Subject: perf record: Fix existing process callgraph symbol When 'perf record -g' a existing process, even with debuginfo packages, still cannnot get symbol from 'perf report'. try: perf record -g -p `pidof xxx` -f perf report 68.26% :1181 b74870f2 [.] 0x000000b74870f2 | |--32.09%-- 0xb73b5b44 | 0xb7487102 | 0xb748a4e2 | 0xb748633d | 0xb73b41cd | 0xb73b4467 | 0xb747d531 The reason is: for existing process, in __cmd_record(), the pid is 0 rather than the existing process id. Signed-off-by: Austin Zhang Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <4710.10.255.24.35.1265389362.squirrel@linux.intel.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3ad599b12c91..771533ced6a8 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -570,7 +570,7 @@ static int __cmd_record(int argc, const char **argv) } if (!system_wide && profile_cpu == -1) - event__synthesize_thread(pid, process_synthesized_event, + event__synthesize_thread(target_pid, process_synthesized_event, session); else event__synthesize_threads(process_synthesized_event, session); -- cgit v1.2.3-58-ga151 From 10fe12ef631a7e85022ed26304a37f033a6a95b8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 20 Feb 2010 19:53:13 -0200 Subject: perf symbols: Fix up map end too on modular kernels with no modules installed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In 2161db9 we stopped failing when not finding modules when asked too, but then the kernel maps (just one, for vmlinux) wasn't having its ->end field correctly set up, so symbols were not being found for the vmlinux map because its range was 0-0. Reported-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1266702793-29434-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.c | 6 ++++++ tools/perf/util/symbol.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index c3831f633dec..9eb7005bc6d6 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -379,6 +379,12 @@ int event__process_mmap(event_t *self, struct perf_session *session) session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start; session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; + /* + * Be a bit paranoid here, some perf.data file came with + * a zero sized synthesized MMAP event for the kernel. + */ + if (session->vmlinux_maps[MAP__FUNCTION]->end == 0) + session->vmlinux_maps[MAP__FUNCTION]->end = ~0UL; perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name, self->mmap.pgoff); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 6882e9fec2d6..ee9c37efdd36 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1937,7 +1937,7 @@ int map_groups__create_kernel_maps(struct map_groups *self, return -1; if (symbol_conf.use_modules && map_groups__create_modules(self) < 0) - return 0; + pr_debug("Problems creating module maps, continuing anyway...\n"); /* * Now that we have all the maps created, just set the ->end of them: */ -- cgit v1.2.3-58-ga151 From faa5c5c36ec50bf43e39c7798ce9701e6b002db3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 19 Feb 2010 23:02:07 -0200 Subject: perf tools: Don't use parent comm if not set at fork time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the parent comm then is worthless, confusing users about the thread where the sample really happened, leading to think that the sample happened in the parent, not where it really happened, in the children of a thread for which a PERF_RECORD_COMM event was not received. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1266627727-19715-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/thread.c | 18 ++++++++++++------ tools/perf/util/thread.h | 1 + 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 634b7f7140d5..9e8995eaf2b6 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -36,7 +36,10 @@ int thread__set_comm(struct thread *self, const char *comm) if (self->comm) free(self->comm); self->comm = strdup(comm); - return self->comm ? 0 : -ENOMEM; + if (self->comm == NULL) + return -ENOMEM; + self->comm_set = true; + return 0; } int thread__comm_len(struct thread *self) @@ -255,11 +258,14 @@ int thread__fork(struct thread *self, struct thread *parent) { int i; - if (self->comm) - free(self->comm); - self->comm = strdup(parent->comm); - if (!self->comm) - return -ENOMEM; + if (parent->comm_set) { + if (self->comm) + free(self->comm); + self->comm = strdup(parent->comm); + if (!self->comm) + return -ENOMEM; + self->comm_set = true; + } for (i = 0; i < MAP__NR_TYPES; ++i) if (map_groups__clone(&self->mg, &parent->mg, i) < 0) diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 56f317b8a06c..0a28f39de545 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -15,6 +15,7 @@ struct thread { struct map_groups mg; pid_t pid; char shortname[3]; + bool comm_set; char *comm; int comm_len; }; -- cgit v1.2.3-58-ga151 From f526d68b6ce9ba7a2bd94e663e240a022524c58a Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:52 -0600 Subject: perf/scripts: Fix supported language listing option 'perf trace -s list' prints a list of the supported scripting languages. One problem with it is that it falls through and prints the trace as well. The use of 'list' for this also makes it easy to confuse with 'perf trace -l', used for listing available scripts. So change 'perf trace -s list' to 'perf trace -s lang' and fixes the fall-through problem. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-2-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Documentation/perf-trace.txt | 4 +++- tools/perf/builtin-trace.c | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 60e5900da483..c00a76fcb8d6 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt @@ -45,9 +45,11 @@ OPTIONS --list=:: Display a list of available trace scripts. --s:: +-s ['lang']:: --script=:: Process trace data with the given script ([lang]:script[.ext]). + If the string 'lang' is specified in place of a script name, a + list of supported languages will be displayed instead. -g:: --gen-script=:: diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 0b65779e3c10..d5d20c34e221 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -219,9 +219,9 @@ static int parse_scriptname(const struct option *opt __used, const char *script, *ext; int len; - if (strcmp(str, "list") == 0) { + if (strcmp(str, "lang") == 0) { list_available_languages(); - return 0; + exit(0); } script = strchr(str, ':'); -- cgit v1.2.3-58-ga151 From e26207a3819684e9b4450a2d30bdd065fa92d9c7 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:53 -0600 Subject: perf/scripts: Fix bug in Util.pm Fix bogus calculation. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-3-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm index 052f132ced24..f869c48dc9b0 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm @@ -44,7 +44,7 @@ sub nsecs_secs { sub nsecs_nsecs { my ($nsecs) = @_; - return $nsecs - nsecs_secs($nsecs); + return $nsecs % $NSECS_PER_SEC; } sub nsecs_str { -- cgit v1.2.3-58-ga151 From 7397d80ddde8eef3b1dce6c29e0c53bd322ef824 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:54 -0600 Subject: perf/scripts: Move common code out of Perl-specific files This stuff is needed by all scripting engines; move it from the Perl engine source to a more common place. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-4-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/scripts/perl/Perf-Trace-Util/Context.c | 5 ++-- tools/perf/scripts/perl/Perf-Trace-Util/Context.xs | 3 ++- tools/perf/util/trace-event-parse.c | 15 ++++++++++++ tools/perf/util/trace-event-perl.c | 27 ---------------------- tools/perf/util/trace-event-perl.h | 8 ------- tools/perf/util/trace-event.h | 9 +++++++- 6 files changed, 28 insertions(+), 39 deletions(-) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c index af78d9a52a7d..01a64ad693f2 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c @@ -31,13 +31,14 @@ #include "EXTERN.h" #include "perl.h" #include "XSUB.h" -#include "../../../util/trace-event-perl.h" +#include "../../../perf.h" +#include "../../../util/trace-event.h" #ifndef PERL_UNUSED_VAR # define PERL_UNUSED_VAR(var) if (0) var = var #endif -#line 41 "Context.c" +#line 42 "Context.c" XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_pc) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs index fb78006c165e..549cf0467d30 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs @@ -22,7 +22,8 @@ #include "EXTERN.h" #include "perl.h" #include "XSUB.h" -#include "../../../util/trace-event-perl.h" +#include "../../../perf.h" +#include "../../../util/trace-event.h" MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context PROTOTYPES: ENABLE diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index c4b3cb8a02b1..9b3c20f42f98 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -3286,3 +3286,18 @@ void parse_set_info(int nr_cpus, int long_sz) cpus = nr_cpus; long_size = long_sz; } + +int common_pc(struct scripting_context *context) +{ + return parse_common_pc(context->event_data); +} + +int common_flags(struct scripting_context *context) +{ + return parse_common_flags(context->event_data); +} + +int common_lock_depth(struct scripting_context *context) +{ + return parse_common_lock_depth(context->event_data); +} diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c index 6d6d76b8a21e..5b49df067df0 100644 --- a/tools/perf/util/trace-event-perl.c +++ b/tools/perf/util/trace-event-perl.c @@ -239,33 +239,6 @@ static inline struct event *find_cache_event(int type) return event; } -int common_pc(struct scripting_context *context) -{ - int pc; - - pc = parse_common_pc(context->event_data); - - return pc; -} - -int common_flags(struct scripting_context *context) -{ - int flags; - - flags = parse_common_flags(context->event_data); - - return flags; -} - -int common_lock_depth(struct scripting_context *context) -{ - int lock_depth; - - lock_depth = parse_common_lock_depth(context->event_data); - - return lock_depth; -} - static void perl_process_event(int cpu, void *data, int size __unused, unsigned long long nsecs, char *comm) diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h index e88fb26137bb..01efcc9564fb 100644 --- a/tools/perf/util/trace-event-perl.h +++ b/tools/perf/util/trace-event-perl.h @@ -44,12 +44,4 @@ void boot_DynaLoader(pTHX_ CV *cv); typedef PerlInterpreter * INTERP; #endif -struct scripting_context { - void *event_data; -}; - -int common_pc(struct scripting_context *context); -int common_flags(struct scripting_context *context); -int common_lock_depth(struct scripting_context *context); - #endif /* __PERF_TRACE_EVENT_PERL_H */ diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 6ad405620c9b..aaf2da2d21e5 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -279,7 +279,14 @@ struct scripting_ops { int script_spec_register(const char *spec, struct scripting_ops *ops); -extern struct scripting_ops perl_scripting_ops; void setup_perl_scripting(void); +struct scripting_context { + void *event_data; +}; + +int common_pc(struct scripting_context *context); +int common_flags(struct scripting_context *context); +int common_lock_depth(struct scripting_context *context); + #endif /* __PERF_TRACE_EVENTS_H */ -- cgit v1.2.3-58-ga151 From 82d156cd5e817055c63ec50247a425c195f4cb14 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:55 -0600 Subject: perf/scripts: Move Perl scripting files to scripting-engines dir Create a scripting-engines directory to contain scripting engine implementation code, in anticipation of the addition of new scripting support. Also removes trace-event-perl.h. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-5-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Makefile | 9 +- .../perf/util/scripting-engines/trace-event-perl.c | 568 ++++++++++++++++++ tools/perf/util/trace-event-perl.c | 634 --------------------- tools/perf/util/trace-event-perl.h | 47 -- tools/perf/util/trace-event-scripting.c | 106 ++++ 5 files changed, 679 insertions(+), 685 deletions(-) create mode 100644 tools/perf/util/scripting-engines/trace-event-perl.c delete mode 100644 tools/perf/util/trace-event-perl.c delete mode 100644 tools/perf/util/trace-event-perl.h create mode 100644 tools/perf/util/trace-event-scripting.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3a5fb36ccc97..0a3c0c8b3fc0 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -385,7 +385,6 @@ LIB_H += util/sort.h LIB_H += util/hist.h LIB_H += util/thread.h LIB_H += util/trace-event.h -LIB_H += util/trace-event-perl.h LIB_H += util/probe-finder.h LIB_H += util/probe-event.h @@ -428,7 +427,7 @@ LIB_OBJS += util/thread.o LIB_OBJS += util/trace-event-parse.o LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o -LIB_OBJS += util/trace-event-perl.o +LIB_OBJS += util/trace-event-scripting.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o @@ -519,6 +518,7 @@ ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; e BASIC_CFLAGS += -DNO_LIBPERL else ALL_LDFLAGS += $(PERL_EMBED_LDOPTS) + LIB_OBJS += util/scripting-engines/trace-event-perl.o LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o endif @@ -893,8 +893,8 @@ util/hweight.o: ../../lib/hweight.c PERF-CFLAGS util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< -util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS - $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< +util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< @@ -1012,6 +1012,7 @@ install: all $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' + ifdef BUILT_INS $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c new file mode 100644 index 000000000000..5376378e0cfc --- /dev/null +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -0,0 +1,568 @@ +/* + * trace-event-perl. Feed perf trace events to an embedded Perl interpreter. + * + * Copyright (C) 2009 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include + +#include "../../perf.h" +#include "../util.h" +#include "../trace-event.h" + +#include +#include + +void boot_Perf__Trace__Context(pTHX_ CV *cv); +void boot_DynaLoader(pTHX_ CV *cv); +typedef PerlInterpreter * INTERP; + +void xs_init(pTHX); + +void xs_init(pTHX) +{ + const char *file = __FILE__; + dXSUB_SYS; + + newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, + file); + newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); +} + +INTERP my_perl; + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(unsigned short) * 8)) - 1) + +struct event *events[FTRACE_MAX_EVENT]; + +extern struct scripting_context *scripting_context; + +static char *cur_field_name; +static int zero_flag_atom; + +static void define_symbolic_value(const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + unsigned long long value; + dSP; + + value = eval_flag(field_value); + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVuv(value))); + XPUSHs(sv_2mortal(newSVpv(field_str, 0))); + + PUTBACK; + if (get_cv("main::define_symbolic_value", 0)) + call_pv("main::define_symbolic_value", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_symbolic_values(struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_symbolic_value(ev_name, field_name, field->value, field->str); + if (field->next) + define_symbolic_values(field->next, ev_name, field_name); +} + +static void define_symbolic_field(const char *ev_name, + const char *field_name) +{ + dSP; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + + PUTBACK; + if (get_cv("main::define_symbolic_field", 0)) + call_pv("main::define_symbolic_field", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_flag_value(const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + unsigned long long value; + dSP; + + value = eval_flag(field_value); + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVuv(value))); + XPUSHs(sv_2mortal(newSVpv(field_str, 0))); + + PUTBACK; + if (get_cv("main::define_flag_value", 0)) + call_pv("main::define_flag_value", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_flag_values(struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_flag_value(ev_name, field_name, field->value, field->str); + if (field->next) + define_flag_values(field->next, ev_name, field_name); +} + +static void define_flag_field(const char *ev_name, + const char *field_name, + const char *delim) +{ + dSP; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVpv(delim, 0))); + + PUTBACK; + if (get_cv("main::define_flag_field", 0)) + call_pv("main::define_flag_field", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_event_symbols(struct event *event, + const char *ev_name, + struct print_arg *args) +{ + switch (args->type) { + case PRINT_NULL: + break; + case PRINT_ATOM: + define_flag_value(ev_name, cur_field_name, "0", + args->atom.atom); + zero_flag_atom = 0; + break; + case PRINT_FIELD: + if (cur_field_name) + free(cur_field_name); + cur_field_name = strdup(args->field.name); + break; + case PRINT_FLAGS: + define_event_symbols(event, ev_name, args->flags.field); + define_flag_field(ev_name, cur_field_name, args->flags.delim); + define_flag_values(args->flags.flags, ev_name, cur_field_name); + break; + case PRINT_SYMBOL: + define_event_symbols(event, ev_name, args->symbol.field); + define_symbolic_field(ev_name, cur_field_name); + define_symbolic_values(args->symbol.symbols, ev_name, + cur_field_name); + break; + case PRINT_STRING: + break; + case PRINT_TYPE: + define_event_symbols(event, ev_name, args->typecast.item); + break; + case PRINT_OP: + if (strcmp(args->op.op, ":") == 0) + zero_flag_atom = 1; + define_event_symbols(event, ev_name, args->op.left); + define_event_symbols(event, ev_name, args->op.right); + break; + default: + /* we should warn... */ + return; + } + + if (args->next) + define_event_symbols(event, ev_name, args->next); +} + +static inline struct event *find_cache_event(int type) +{ + static char ev_name[256]; + struct event *event; + + if (events[type]) + return events[type]; + + events[type] = event = trace_find_event(type); + if (!event) + return NULL; + + sprintf(ev_name, "%s::%s", event->system, event->name); + + define_event_symbols(event, ev_name, event->print_fmt.args); + + return event; +} + +static void perl_process_event(int cpu, void *data, + int size __unused, + unsigned long long nsecs, char *comm) +{ + struct format_field *field; + static char handler[256]; + unsigned long long val; + unsigned long s, ns; + struct event *event; + int type; + int pid; + + dSP; + + type = trace_parse_common_type(data); + + event = find_cache_event(type); + if (!event) + die("ug! no event found for type %d", type); + + pid = trace_parse_common_pid(data); + + sprintf(handler, "%s::%s", event->system, event->name); + + s = nsecs / NSECS_PER_SEC; + ns = nsecs - s * NSECS_PER_SEC; + + scripting_context->event_data = data; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(handler, 0))); + XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); + XPUSHs(sv_2mortal(newSVuv(cpu))); + XPUSHs(sv_2mortal(newSVuv(s))); + XPUSHs(sv_2mortal(newSVuv(ns))); + XPUSHs(sv_2mortal(newSViv(pid))); + XPUSHs(sv_2mortal(newSVpv(comm, 0))); + + /* common fields other than pid can be accessed via xsub fns */ + + for (field = event->format.fields; field; field = field->next) { + if (field->flags & FIELD_IS_STRING) { + int offset; + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(data + field->offset); + offset &= 0xffff; + } else + offset = field->offset; + XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); + } else { /* FIELD_IS_NUMERIC */ + val = read_size(data + field->offset, field->size); + if (field->flags & FIELD_IS_SIGNED) { + XPUSHs(sv_2mortal(newSViv(val))); + } else { + XPUSHs(sv_2mortal(newSVuv(val))); + } + } + } + + PUTBACK; + + if (get_cv(handler, 0)) + call_pv(handler, G_SCALAR); + else if (get_cv("main::trace_unhandled", 0)) { + XPUSHs(sv_2mortal(newSVpv(handler, 0))); + XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); + XPUSHs(sv_2mortal(newSVuv(cpu))); + XPUSHs(sv_2mortal(newSVuv(nsecs))); + XPUSHs(sv_2mortal(newSViv(pid))); + XPUSHs(sv_2mortal(newSVpv(comm, 0))); + call_pv("main::trace_unhandled", G_SCALAR); + } + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void run_start_sub(void) +{ + dSP; /* access to Perl stack */ + PUSHMARK(SP); + + if (get_cv("main::trace_begin", 0)) + call_pv("main::trace_begin", G_DISCARD | G_NOARGS); +} + +/* + * Start trace script + */ +static int perl_start_script(const char *script, int argc, const char **argv) +{ + const char **command_line; + int i, err = 0; + + command_line = malloc((argc + 2) * sizeof(const char *)); + command_line[0] = ""; + command_line[1] = script; + for (i = 2; i < argc + 2; i++) + command_line[i] = argv[i - 2]; + + my_perl = perl_alloc(); + perl_construct(my_perl); + + if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, + (char **)NULL)) { + err = -1; + goto error; + } + + if (perl_run(my_perl)) { + err = -1; + goto error; + } + + if (SvTRUE(ERRSV)) { + err = -1; + goto error; + } + + run_start_sub(); + + free(command_line); + fprintf(stderr, "perf trace started with Perl script %s\n\n", script); + return 0; +error: + perl_free(my_perl); + free(command_line); + + return err; +} + +/* + * Stop trace script + */ +static int perl_stop_script(void) +{ + dSP; /* access to Perl stack */ + PUSHMARK(SP); + + if (get_cv("main::trace_end", 0)) + call_pv("main::trace_end", G_DISCARD | G_NOARGS); + + perl_destruct(my_perl); + perl_free(my_perl); + + fprintf(stderr, "\nperf trace Perl script stopped\n"); + + return 0; +} + +static int perl_generate_script(const char *outfile) +{ + struct event *event = NULL; + struct format_field *f; + char fname[PATH_MAX]; + int not_first, count; + FILE *ofp; + + sprintf(fname, "%s.pl", outfile); + ofp = fopen(fname, "w"); + if (ofp == NULL) { + fprintf(stderr, "couldn't open %s\n", fname); + return -1; + } + + fprintf(ofp, "# perf trace event handlers, " + "generated by perf trace -g perl\n"); + + fprintf(ofp, "# Licensed under the terms of the GNU GPL" + " License version 2\n\n"); + + fprintf(ofp, "# The common_* event handler fields are the most useful " + "fields common to\n"); + + fprintf(ofp, "# all events. They don't necessarily correspond to " + "the 'common_*' fields\n"); + + fprintf(ofp, "# in the format files. Those fields not available as " + "handler params can\n"); + + fprintf(ofp, "# be retrieved using Perl functions of the form " + "common_*($context).\n"); + + fprintf(ofp, "# See Context.pm for the list of available " + "functions.\n\n"); + + fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" + "Perf-Trace-Util/lib\";\n"); + + fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); + fprintf(ofp, "use Perf::Trace::Core;\n"); + fprintf(ofp, "use Perf::Trace::Context;\n"); + fprintf(ofp, "use Perf::Trace::Util;\n\n"); + + fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); + fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); + + while ((event = trace_find_next_event(event))) { + fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); + fprintf(ofp, "\tmy ("); + + fprintf(ofp, "$event_name, "); + fprintf(ofp, "$context, "); + fprintf(ofp, "$common_cpu, "); + fprintf(ofp, "$common_secs, "); + fprintf(ofp, "$common_nsecs,\n"); + fprintf(ofp, "\t $common_pid, "); + fprintf(ofp, "$common_comm,\n\t "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (++count % 5 == 0) + fprintf(ofp, "\n\t "); + + fprintf(ofp, "$%s", f->name); + } + fprintf(ofp, ") = @_;\n\n"); + + fprintf(ofp, "\tprint_header($event_name, $common_cpu, " + "$common_secs, $common_nsecs,\n\t " + "$common_pid, $common_comm);\n\n"); + + fprintf(ofp, "\tprintf(\""); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (count && count % 4 == 0) { + fprintf(ofp, "\".\n\t \""); + } + count++; + + fprintf(ofp, "%s=", f->name); + if (f->flags & FIELD_IS_STRING || + f->flags & FIELD_IS_FLAG || + f->flags & FIELD_IS_SYMBOLIC) + fprintf(ofp, "%%s"); + else if (f->flags & FIELD_IS_SIGNED) + fprintf(ofp, "%%d"); + else + fprintf(ofp, "%%u"); + } + + fprintf(ofp, "\\n\",\n\t "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + + if (++count % 5 == 0) + fprintf(ofp, "\n\t "); + + if (f->flags & FIELD_IS_FLAG) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t "); + count = 4; + } + fprintf(ofp, "flag_str(\""); + fprintf(ofp, "%s::%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", $%s)", f->name, + f->name); + } else if (f->flags & FIELD_IS_SYMBOLIC) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t "); + count = 4; + } + fprintf(ofp, "symbol_str(\""); + fprintf(ofp, "%s::%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", $%s)", f->name, + f->name); + } else + fprintf(ofp, "$%s", f->name); + } + + fprintf(ofp, ");\n"); + fprintf(ofp, "}\n\n"); + } + + fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " + "$common_cpu, $common_secs, $common_nsecs,\n\t " + "$common_pid, $common_comm) = @_;\n\n"); + + fprintf(ofp, "\tprint_header($event_name, $common_cpu, " + "$common_secs, $common_nsecs,\n\t $common_pid, " + "$common_comm);\n}\n\n"); + + fprintf(ofp, "sub print_header\n{\n" + "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" + "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " + "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); + + fclose(ofp); + + fprintf(stderr, "generated Perl script: %s\n", fname); + + return 0; +} + +struct scripting_ops perl_scripting_ops = { + .name = "Perl", + .start_script = perl_start_script, + .stop_script = perl_stop_script, + .process_event = perl_process_event, + .generate_script = perl_generate_script, +}; diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c deleted file mode 100644 index 5b49df067df0..000000000000 --- a/tools/perf/util/trace-event-perl.c +++ /dev/null @@ -1,634 +0,0 @@ -/* - * trace-event-perl. Feed perf trace events to an embedded Perl interpreter. - * - * Copyright (C) 2009 Tom Zanussi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include -#include - -#include "../perf.h" -#include "util.h" -#include "trace-event.h" -#include "trace-event-perl.h" - -void xs_init(pTHX); - -void xs_init(pTHX) -{ - const char *file = __FILE__; - dXSUB_SYS; - - newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, - file); - newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); -} - -INTERP my_perl; - -#define FTRACE_MAX_EVENT \ - ((1 << (sizeof(unsigned short) * 8)) - 1) - -struct event *events[FTRACE_MAX_EVENT]; - -static struct scripting_context *scripting_context; - -static char *cur_field_name; -static int zero_flag_atom; - -static void define_symbolic_value(const char *ev_name, - const char *field_name, - const char *field_value, - const char *field_str) -{ - unsigned long long value; - dSP; - - value = eval_flag(field_value); - - ENTER; - SAVETMPS; - PUSHMARK(SP); - - XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); - XPUSHs(sv_2mortal(newSVpv(field_name, 0))); - XPUSHs(sv_2mortal(newSVuv(value))); - XPUSHs(sv_2mortal(newSVpv(field_str, 0))); - - PUTBACK; - if (get_cv("main::define_symbolic_value", 0)) - call_pv("main::define_symbolic_value", G_SCALAR); - SPAGAIN; - PUTBACK; - FREETMPS; - LEAVE; -} - -static void define_symbolic_values(struct print_flag_sym *field, - const char *ev_name, - const char *field_name) -{ - define_symbolic_value(ev_name, field_name, field->value, field->str); - if (field->next) - define_symbolic_values(field->next, ev_name, field_name); -} - -static void define_symbolic_field(const char *ev_name, - const char *field_name) -{ - dSP; - - ENTER; - SAVETMPS; - PUSHMARK(SP); - - XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); - XPUSHs(sv_2mortal(newSVpv(field_name, 0))); - - PUTBACK; - if (get_cv("main::define_symbolic_field", 0)) - call_pv("main::define_symbolic_field", G_SCALAR); - SPAGAIN; - PUTBACK; - FREETMPS; - LEAVE; -} - -static void define_flag_value(const char *ev_name, - const char *field_name, - const char *field_value, - const char *field_str) -{ - unsigned long long value; - dSP; - - value = eval_flag(field_value); - - ENTER; - SAVETMPS; - PUSHMARK(SP); - - XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); - XPUSHs(sv_2mortal(newSVpv(field_name, 0))); - XPUSHs(sv_2mortal(newSVuv(value))); - XPUSHs(sv_2mortal(newSVpv(field_str, 0))); - - PUTBACK; - if (get_cv("main::define_flag_value", 0)) - call_pv("main::define_flag_value", G_SCALAR); - SPAGAIN; - PUTBACK; - FREETMPS; - LEAVE; -} - -static void define_flag_values(struct print_flag_sym *field, - const char *ev_name, - const char *field_name) -{ - define_flag_value(ev_name, field_name, field->value, field->str); - if (field->next) - define_flag_values(field->next, ev_name, field_name); -} - -static void define_flag_field(const char *ev_name, - const char *field_name, - const char *delim) -{ - dSP; - - ENTER; - SAVETMPS; - PUSHMARK(SP); - - XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); - XPUSHs(sv_2mortal(newSVpv(field_name, 0))); - XPUSHs(sv_2mortal(newSVpv(delim, 0))); - - PUTBACK; - if (get_cv("main::define_flag_field", 0)) - call_pv("main::define_flag_field", G_SCALAR); - SPAGAIN; - PUTBACK; - FREETMPS; - LEAVE; -} - -static void define_event_symbols(struct event *event, - const char *ev_name, - struct print_arg *args) -{ - switch (args->type) { - case PRINT_NULL: - break; - case PRINT_ATOM: - define_flag_value(ev_name, cur_field_name, "0", - args->atom.atom); - zero_flag_atom = 0; - break; - case PRINT_FIELD: - if (cur_field_name) - free(cur_field_name); - cur_field_name = strdup(args->field.name); - break; - case PRINT_FLAGS: - define_event_symbols(event, ev_name, args->flags.field); - define_flag_field(ev_name, cur_field_name, args->flags.delim); - define_flag_values(args->flags.flags, ev_name, cur_field_name); - break; - case PRINT_SYMBOL: - define_event_symbols(event, ev_name, args->symbol.field); - define_symbolic_field(ev_name, cur_field_name); - define_symbolic_values(args->symbol.symbols, ev_name, - cur_field_name); - break; - case PRINT_STRING: - break; - case PRINT_TYPE: - define_event_symbols(event, ev_name, args->typecast.item); - break; - case PRINT_OP: - if (strcmp(args->op.op, ":") == 0) - zero_flag_atom = 1; - define_event_symbols(event, ev_name, args->op.left); - define_event_symbols(event, ev_name, args->op.right); - break; - default: - /* we should warn... */ - return; - } - - if (args->next) - define_event_symbols(event, ev_name, args->next); -} - -static inline struct event *find_cache_event(int type) -{ - static char ev_name[256]; - struct event *event; - - if (events[type]) - return events[type]; - - events[type] = event = trace_find_event(type); - if (!event) - return NULL; - - sprintf(ev_name, "%s::%s", event->system, event->name); - - define_event_symbols(event, ev_name, event->print_fmt.args); - - return event; -} - -static void perl_process_event(int cpu, void *data, - int size __unused, - unsigned long long nsecs, char *comm) -{ - struct format_field *field; - static char handler[256]; - unsigned long long val; - unsigned long s, ns; - struct event *event; - int type; - int pid; - - dSP; - - type = trace_parse_common_type(data); - - event = find_cache_event(type); - if (!event) - die("ug! no event found for type %d", type); - - pid = trace_parse_common_pid(data); - - sprintf(handler, "%s::%s", event->system, event->name); - - s = nsecs / NSECS_PER_SEC; - ns = nsecs - s * NSECS_PER_SEC; - - scripting_context->event_data = data; - - ENTER; - SAVETMPS; - PUSHMARK(SP); - - XPUSHs(sv_2mortal(newSVpv(handler, 0))); - XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); - XPUSHs(sv_2mortal(newSVuv(cpu))); - XPUSHs(sv_2mortal(newSVuv(s))); - XPUSHs(sv_2mortal(newSVuv(ns))); - XPUSHs(sv_2mortal(newSViv(pid))); - XPUSHs(sv_2mortal(newSVpv(comm, 0))); - - /* common fields other than pid can be accessed via xsub fns */ - - for (field = event->format.fields; field; field = field->next) { - if (field->flags & FIELD_IS_STRING) { - int offset; - if (field->flags & FIELD_IS_DYNAMIC) { - offset = *(int *)(data + field->offset); - offset &= 0xffff; - } else - offset = field->offset; - XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); - } else { /* FIELD_IS_NUMERIC */ - val = read_size(data + field->offset, field->size); - if (field->flags & FIELD_IS_SIGNED) { - XPUSHs(sv_2mortal(newSViv(val))); - } else { - XPUSHs(sv_2mortal(newSVuv(val))); - } - } - } - - PUTBACK; - - if (get_cv(handler, 0)) - call_pv(handler, G_SCALAR); - else if (get_cv("main::trace_unhandled", 0)) { - XPUSHs(sv_2mortal(newSVpv(handler, 0))); - XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); - XPUSHs(sv_2mortal(newSVuv(cpu))); - XPUSHs(sv_2mortal(newSVuv(nsecs))); - XPUSHs(sv_2mortal(newSViv(pid))); - XPUSHs(sv_2mortal(newSVpv(comm, 0))); - call_pv("main::trace_unhandled", G_SCALAR); - } - SPAGAIN; - PUTBACK; - FREETMPS; - LEAVE; -} - -static void run_start_sub(void) -{ - dSP; /* access to Perl stack */ - PUSHMARK(SP); - - if (get_cv("main::trace_begin", 0)) - call_pv("main::trace_begin", G_DISCARD | G_NOARGS); -} - -/* - * Start trace script - */ -static int perl_start_script(const char *script, int argc, const char **argv) -{ - const char **command_line; - int i, err = 0; - - command_line = malloc((argc + 2) * sizeof(const char *)); - command_line[0] = ""; - command_line[1] = script; - for (i = 2; i < argc + 2; i++) - command_line[i] = argv[i - 2]; - - my_perl = perl_alloc(); - perl_construct(my_perl); - - if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, - (char **)NULL)) { - err = -1; - goto error; - } - - if (perl_run(my_perl)) { - err = -1; - goto error; - } - - if (SvTRUE(ERRSV)) { - err = -1; - goto error; - } - - run_start_sub(); - - free(command_line); - fprintf(stderr, "perf trace started with Perl script %s\n\n", script); - return 0; -error: - perl_free(my_perl); - free(command_line); - - return err; -} - -/* - * Stop trace script - */ -static int perl_stop_script(void) -{ - dSP; /* access to Perl stack */ - PUSHMARK(SP); - - if (get_cv("main::trace_end", 0)) - call_pv("main::trace_end", G_DISCARD | G_NOARGS); - - perl_destruct(my_perl); - perl_free(my_perl); - - fprintf(stderr, "\nperf trace Perl script stopped\n"); - - return 0; -} - -static int perl_generate_script(const char *outfile) -{ - struct event *event = NULL; - struct format_field *f; - char fname[PATH_MAX]; - int not_first, count; - FILE *ofp; - - sprintf(fname, "%s.pl", outfile); - ofp = fopen(fname, "w"); - if (ofp == NULL) { - fprintf(stderr, "couldn't open %s\n", fname); - return -1; - } - - fprintf(ofp, "# perf trace event handlers, " - "generated by perf trace -g perl\n"); - - fprintf(ofp, "# Licensed under the terms of the GNU GPL" - " License version 2\n\n"); - - fprintf(ofp, "# The common_* event handler fields are the most useful " - "fields common to\n"); - - fprintf(ofp, "# all events. They don't necessarily correspond to " - "the 'common_*' fields\n"); - - fprintf(ofp, "# in the format files. Those fields not available as " - "handler params can\n"); - - fprintf(ofp, "# be retrieved using Perl functions of the form " - "common_*($context).\n"); - - fprintf(ofp, "# See Context.pm for the list of available " - "functions.\n\n"); - - fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" - "Perf-Trace-Util/lib\";\n"); - - fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); - fprintf(ofp, "use Perf::Trace::Core;\n"); - fprintf(ofp, "use Perf::Trace::Context;\n"); - fprintf(ofp, "use Perf::Trace::Util;\n\n"); - - fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); - fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); - - while ((event = trace_find_next_event(event))) { - fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); - fprintf(ofp, "\tmy ("); - - fprintf(ofp, "$event_name, "); - fprintf(ofp, "$context, "); - fprintf(ofp, "$common_cpu, "); - fprintf(ofp, "$common_secs, "); - fprintf(ofp, "$common_nsecs,\n"); - fprintf(ofp, "\t $common_pid, "); - fprintf(ofp, "$common_comm,\n\t "); - - not_first = 0; - count = 0; - - for (f = event->format.fields; f; f = f->next) { - if (not_first++) - fprintf(ofp, ", "); - if (++count % 5 == 0) - fprintf(ofp, "\n\t "); - - fprintf(ofp, "$%s", f->name); - } - fprintf(ofp, ") = @_;\n\n"); - - fprintf(ofp, "\tprint_header($event_name, $common_cpu, " - "$common_secs, $common_nsecs,\n\t " - "$common_pid, $common_comm);\n\n"); - - fprintf(ofp, "\tprintf(\""); - - not_first = 0; - count = 0; - - for (f = event->format.fields; f; f = f->next) { - if (not_first++) - fprintf(ofp, ", "); - if (count && count % 4 == 0) { - fprintf(ofp, "\".\n\t \""); - } - count++; - - fprintf(ofp, "%s=", f->name); - if (f->flags & FIELD_IS_STRING || - f->flags & FIELD_IS_FLAG || - f->flags & FIELD_IS_SYMBOLIC) - fprintf(ofp, "%%s"); - else if (f->flags & FIELD_IS_SIGNED) - fprintf(ofp, "%%d"); - else - fprintf(ofp, "%%u"); - } - - fprintf(ofp, "\\n\",\n\t "); - - not_first = 0; - count = 0; - - for (f = event->format.fields; f; f = f->next) { - if (not_first++) - fprintf(ofp, ", "); - - if (++count % 5 == 0) - fprintf(ofp, "\n\t "); - - if (f->flags & FIELD_IS_FLAG) { - if ((count - 1) % 5 != 0) { - fprintf(ofp, "\n\t "); - count = 4; - } - fprintf(ofp, "flag_str(\""); - fprintf(ofp, "%s::%s\", ", event->system, - event->name); - fprintf(ofp, "\"%s\", $%s)", f->name, - f->name); - } else if (f->flags & FIELD_IS_SYMBOLIC) { - if ((count - 1) % 5 != 0) { - fprintf(ofp, "\n\t "); - count = 4; - } - fprintf(ofp, "symbol_str(\""); - fprintf(ofp, "%s::%s\", ", event->system, - event->name); - fprintf(ofp, "\"%s\", $%s)", f->name, - f->name); - } else - fprintf(ofp, "$%s", f->name); - } - - fprintf(ofp, ");\n"); - fprintf(ofp, "}\n\n"); - } - - fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " - "$common_cpu, $common_secs, $common_nsecs,\n\t " - "$common_pid, $common_comm) = @_;\n\n"); - - fprintf(ofp, "\tprint_header($event_name, $common_cpu, " - "$common_secs, $common_nsecs,\n\t $common_pid, " - "$common_comm);\n}\n\n"); - - fprintf(ofp, "sub print_header\n{\n" - "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" - "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " - "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); - - fclose(ofp); - - fprintf(stderr, "generated Perl script: %s\n", fname); - - return 0; -} - -struct scripting_ops perl_scripting_ops = { - .name = "Perl", - .start_script = perl_start_script, - .stop_script = perl_stop_script, - .process_event = perl_process_event, - .generate_script = perl_generate_script, -}; - -static void print_unsupported_msg(void) -{ - fprintf(stderr, "Perl scripting not supported." - " Install libperl and rebuild perf to enable it.\n" - "For example:\n # apt-get install libperl-dev (ubuntu)" - "\n # yum install perl-ExtUtils-Embed (Fedora)" - "\n etc.\n"); -} - -static int perl_start_script_unsupported(const char *script __unused, - int argc __unused, - const char **argv __unused) -{ - print_unsupported_msg(); - - return -1; -} - -static int perl_stop_script_unsupported(void) -{ - return 0; -} - -static void perl_process_event_unsupported(int cpu __unused, - void *data __unused, - int size __unused, - unsigned long long nsecs __unused, - char *comm __unused) -{ -} - -static int perl_generate_script_unsupported(const char *outfile __unused) -{ - print_unsupported_msg(); - - return -1; -} - -struct scripting_ops perl_scripting_unsupported_ops = { - .name = "Perl", - .start_script = perl_start_script_unsupported, - .stop_script = perl_stop_script_unsupported, - .process_event = perl_process_event_unsupported, - .generate_script = perl_generate_script_unsupported, -}; - -static void register_perl_scripting(struct scripting_ops *scripting_ops) -{ - int err; - err = script_spec_register("Perl", scripting_ops); - if (err) - die("error registering Perl script extension"); - - err = script_spec_register("pl", scripting_ops); - if (err) - die("error registering pl script extension"); - - scripting_context = malloc(sizeof(struct scripting_context)); -} - -#ifdef NO_LIBPERL -void setup_perl_scripting(void) -{ - register_perl_scripting(&perl_scripting_unsupported_ops); -} -#else -void setup_perl_scripting(void) -{ - register_perl_scripting(&perl_scripting_ops); -} -#endif diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h deleted file mode 100644 index 01efcc9564fb..000000000000 --- a/tools/perf/util/trace-event-perl.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef __PERF_TRACE_EVENT_PERL_H -#define __PERF_TRACE_EVENT_PERL_H -#ifdef NO_LIBPERL -typedef int INTERP; -#define dSP -#define ENTER -#define SAVETMPS -#define PUTBACK -#define SPAGAIN -#define FREETMPS -#define LEAVE -#define SP -#define ERRSV -#define G_SCALAR (0) -#define G_DISCARD (0) -#define G_NOARGS (0) -#define PUSHMARK(a) -#define SvTRUE(a) (0) -#define XPUSHs(s) -#define sv_2mortal(a) -#define newSVpv(a,b) -#define newSVuv(a) -#define newSViv(a) -#define get_cv(a,b) (0) -#define call_pv(a,b) (0) -#define perl_alloc() (0) -#define perl_construct(a) (0) -#define perl_parse(a,b,c,d,e) (0) -#define perl_run(a) (0) -#define perl_destruct(a) (0) -#define perl_free(a) (0) -#define pTHX void -#define CV void -#define dXSUB_SYS -#define pTHX_ -static inline void newXS(const char *a, void *b, const char *c) {} -static void boot_Perf__Trace__Context(pTHX_ CV *cv) {} -static void boot_DynaLoader(pTHX_ CV *cv) {} -#else -#include -#include -void boot_Perf__Trace__Context(pTHX_ CV *cv); -void boot_DynaLoader(pTHX_ CV *cv); -typedef PerlInterpreter * INTERP; -#endif - -#endif /* __PERF_TRACE_EVENT_PERL_H */ diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c new file mode 100644 index 000000000000..9e371965c034 --- /dev/null +++ b/tools/perf/util/trace-event-scripting.c @@ -0,0 +1,106 @@ +/* + * trace-event-scripting. Scripting engine common and initialization code. + * + * Copyright (C) 2009-2010 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "util.h" +#include "trace-event.h" + +struct scripting_context *scripting_context; + +static int stop_script_unsupported(void) +{ + return 0; +} + +static void process_event_unsupported(int cpu __unused, + void *data __unused, + int size __unused, + unsigned long long nsecs __unused, + char *comm __unused) +{ +} + +static void print_perl_unsupported_msg(void) +{ + fprintf(stderr, "Perl scripting not supported." + " Install libperl and rebuild perf to enable it.\n" + "For example:\n # apt-get install libperl-dev (ubuntu)" + "\n # yum install 'perl(ExtUtils::Embed)' (Fedora)" + "\n etc.\n"); +} + +static int perl_start_script_unsupported(const char *script __unused, + int argc __unused, + const char **argv __unused) +{ + print_perl_unsupported_msg(); + + return -1; +} + +static int perl_generate_script_unsupported(const char *outfile __unused) +{ + print_perl_unsupported_msg(); + + return -1; +} + +struct scripting_ops perl_scripting_unsupported_ops = { + .name = "Perl", + .start_script = perl_start_script_unsupported, + .stop_script = stop_script_unsupported, + .process_event = process_event_unsupported, + .generate_script = perl_generate_script_unsupported, +}; + +static void register_perl_scripting(struct scripting_ops *scripting_ops) +{ + int err; + err = script_spec_register("Perl", scripting_ops); + if (err) + die("error registering Perl script extension"); + + err = script_spec_register("pl", scripting_ops); + if (err) + die("error registering pl script extension"); + + scripting_context = malloc(sizeof(struct scripting_context)); +} + +#ifdef NO_LIBPERL +void setup_perl_scripting(void) +{ + register_perl_scripting(&perl_scripting_unsupported_ops); +} +#else +struct scripting_ops perl_scripting_ops; + +void setup_perl_scripting(void) +{ + register_perl_scripting(&perl_scripting_ops); +} +#endif -- cgit v1.2.3-58-ga151 From 266fe2f217d1dc9f8041e395c0ab4569a5bad91a Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:56 -0600 Subject: perf/scripts: Remove check-perf-trace from listed scripts The check-perf-trace script only checks Perl functionality, and doesn't really need to be listed as as user script anyway. This only removes the '-report' shell script, so although it doesn't appear in the listing, the '-record' shell script and the check perf trace perl script itself is still available and can still be run manually as such: $ libexec/perf-core/scripts/perl/bin/check-perf-trace-record $ perf trace -s libexec/perf-core/scripts/perl/check-perf-trace.pl Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-6-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/scripts/perl/bin/check-perf-trace-record | 5 ----- tools/perf/scripts/perl/bin/check-perf-trace-report | 6 ------ 2 files changed, 11 deletions(-) delete mode 100644 tools/perf/scripts/perl/bin/check-perf-trace-report diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record index c7ec5de2f535..3c1574498942 100644 --- a/tools/perf/scripts/perl/bin/check-perf-trace-record +++ b/tools/perf/scripts/perl/bin/check-perf-trace-record @@ -1,7 +1,2 @@ #!/bin/bash perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry - - - - - diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-report b/tools/perf/scripts/perl/bin/check-perf-trace-report deleted file mode 100644 index 7fc4a033dd49..000000000000 --- a/tools/perf/scripts/perl/bin/check-perf-trace-report +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -# description: useless but exhaustive test script -perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl - - - -- cgit v1.2.3-58-ga151 From 7e4b21b84c43bb8a80b916e40718ca4ed1fc52e6 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:57 -0600 Subject: perf/scripts: Add Python scripting engine Add base support for Python scripting to perf trace. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-6-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Makefile | 21 + tools/perf/builtin-trace.c | 1 + .../perf/scripts/python/Perf-Trace-Util/Context.c | 88 ++++ .../python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 91 ++++ .../python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 25 + .../util/scripting-engines/trace-event-python.c | 576 +++++++++++++++++++++ tools/perf/util/trace-event-scripting.c | 61 +++ tools/perf/util/trace-event.h | 1 + 8 files changed, 864 insertions(+) create mode 100644 tools/perf/scripts/python/Perf-Trace-Util/Context.c create mode 100644 tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py create mode 100644 tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py create mode 100644 tools/perf/util/scripting-engines/trace-event-python.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0a3c0c8b3fc0..14273164db04 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -522,6 +522,19 @@ else LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o endif +ifndef NO_LIBPYTHON +PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null` +PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null` +endif + +ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o /dev/null $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y) + BASIC_CFLAGS += -DNO_LIBPYTHON +else + ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS) + LIB_OBJS += util/scripting-engines/trace-event-python.o + LIB_OBJS += scripts/python/Perf-Trace-Util/Context.o +endif + ifdef NO_DEMANGLE BASIC_CFLAGS += -DNO_DEMANGLE else @@ -899,6 +912,12 @@ util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-pe scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< +util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-python.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< + +scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o scripts/python/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< + perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) @@ -1012,6 +1031,8 @@ install: all $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' + $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' ifdef BUILT_INS $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d5d20c34e221..5db687fc13de 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -44,6 +44,7 @@ static void setup_scripting(void) perf_set_argv_exec_path(perf_exec_path()); setup_perl_scripting(); + setup_python_scripting(); scripting_ops = &default_scripting_ops; } diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c new file mode 100644 index 000000000000..957085dd5d8d --- /dev/null +++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c @@ -0,0 +1,88 @@ +/* + * Context.c. Python interfaces for perf trace. + * + * Copyright (C) 2010 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include "../../../perf.h" +#include "../../../util/trace-event.h" + +PyMODINIT_FUNC initperf_trace_context(void); + +static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args) +{ + static struct scripting_context *scripting_context; + PyObject *context; + int retval; + + if (!PyArg_ParseTuple(args, "O", &context)) + return NULL; + + scripting_context = PyCObject_AsVoidPtr(context); + retval = common_pc(scripting_context); + + return Py_BuildValue("i", retval); +} + +static PyObject *perf_trace_context_common_flags(PyObject *self, + PyObject *args) +{ + static struct scripting_context *scripting_context; + PyObject *context; + int retval; + + if (!PyArg_ParseTuple(args, "O", &context)) + return NULL; + + scripting_context = PyCObject_AsVoidPtr(context); + retval = common_flags(scripting_context); + + return Py_BuildValue("i", retval); +} + +static PyObject *perf_trace_context_common_lock_depth(PyObject *self, + PyObject *args) +{ + static struct scripting_context *scripting_context; + PyObject *context; + int retval; + + if (!PyArg_ParseTuple(args, "O", &context)) + return NULL; + + scripting_context = PyCObject_AsVoidPtr(context); + retval = common_lock_depth(scripting_context); + + return Py_BuildValue("i", retval); +} + +static PyMethodDef ContextMethods[] = { + { "common_pc", perf_trace_context_common_pc, METH_VARARGS, + "Get the common preempt count event field value."}, + { "common_flags", perf_trace_context_common_flags, METH_VARARGS, + "Get the common flags event field value."}, + { "common_lock_depth", perf_trace_context_common_lock_depth, + METH_VARARGS, "Get the common lock depth event field value."}, + { NULL, NULL, 0, NULL} +}; + +PyMODINIT_FUNC initperf_trace_context(void) +{ + (void) Py_InitModule("perf_trace_context", ContextMethods); +} diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py new file mode 100644 index 000000000000..1dc464ee2ca8 --- /dev/null +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py @@ -0,0 +1,91 @@ +# Core.py - Python extension for perf trace, core functions +# +# Copyright (C) 2010 by Tom Zanussi +# +# This software may be distributed under the terms of the GNU General +# Public License ("GPL") version 2 as published by the Free Software +# Foundation. + +from collections import defaultdict + +def autodict(): + return defaultdict(autodict) + +flag_fields = autodict() +symbolic_fields = autodict() + +def define_flag_field(event_name, field_name, delim): + flag_fields[event_name][field_name]['delim'] = delim + +def define_flag_value(event_name, field_name, value, field_str): + flag_fields[event_name][field_name]['values'][value] = field_str + +def define_symbolic_field(event_name, field_name): + # nothing to do, really + pass + +def define_symbolic_value(event_name, field_name, value, field_str): + symbolic_fields[event_name][field_name]['values'][value] = field_str + +def flag_str(event_name, field_name, value): + string = "" + + if flag_fields[event_name][field_name]: + print_delim = 0 + keys = flag_fields[event_name][field_name]['values'].keys() + keys.sort() + for idx in keys: + if not value and not idx: + string += flag_fields[event_name][field_name]['values'][idx] + break + if idx and (value & idx) == idx: + if print_delim and flag_fields[event_name][field_name]['delim']: + string += " " + flag_fields[event_name][field_name]['delim'] + " " + string += flag_fields[event_name][field_name]['values'][idx] + print_delim = 1 + value &= ~idx + + return string + +def symbol_str(event_name, field_name, value): + string = "" + + if symbolic_fields[event_name][field_name]: + keys = symbolic_fields[event_name][field_name]['values'].keys() + keys.sort() + for idx in keys: + if not value and not idx: + string = symbolic_fields[event_name][field_name]['values'][idx] + break + if (value == idx): + string = symbolic_fields[event_name][field_name]['values'][idx] + break + + return string + +trace_flags = { 0x00: "NONE", \ + 0x01: "IRQS_OFF", \ + 0x02: "IRQS_NOSUPPORT", \ + 0x04: "NEED_RESCHED", \ + 0x08: "HARDIRQ", \ + 0x10: "SOFTIRQ" } + +def trace_flag_str(value): + string = "" + print_delim = 0 + + keys = trace_flags.keys() + + for idx in keys: + if not value and not idx: + string += "NONE" + break + + if idx and (value & idx) == idx: + if print_delim: + string += " | "; + string += trace_flags[idx] + print_delim = 1 + value &= ~idx + + return string diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py new file mode 100644 index 000000000000..83e91435ed09 --- /dev/null +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -0,0 +1,25 @@ +# Util.py - Python extension for perf trace, miscellaneous utility code +# +# Copyright (C) 2010 by Tom Zanussi +# +# This software may be distributed under the terms of the GNU General +# Public License ("GPL") version 2 as published by the Free Software +# Foundation. + +NSECS_PER_SEC = 1000000000 + +def avg(total, n): + return total / n + +def nsecs(secs, nsecs): + return secs * NSECS_PER_SEC + nsecs + +def nsecs_secs(nsecs): + return nsecs / NSECS_PER_SEC + +def nsecs_nsecs(nsecs): + return nsecs % NSECS_PER_SEC + +def nsecs_str(nsecs): + str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), + return str diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c new file mode 100644 index 000000000000..d402f64f9b46 --- /dev/null +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -0,0 +1,576 @@ +/* + * trace-event-python. Feed trace events to an embedded Python interpreter. + * + * Copyright (C) 2010 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include + +#include "../../perf.h" +#include "../util.h" +#include "../trace-event.h" + +PyMODINIT_FUNC initperf_trace_context(void); + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(unsigned short) * 8)) - 1) + +struct event *events[FTRACE_MAX_EVENT]; + +#define MAX_FIELDS 64 +#define N_COMMON_FIELDS 7 + +extern struct scripting_context *scripting_context; + +static char *cur_field_name; +static int zero_flag_atom; + +static PyObject *main_module, *main_dict; + +static void handler_call_die(const char *handler_name) +{ + PyErr_Print(); + Py_FatalError("problem in Python trace event handler"); +} + +static void define_value(enum print_arg_type field_type, + const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + const char *handler_name = "define_flag_value"; + PyObject *handler, *t, *retval; + unsigned long long value; + unsigned n = 0; + + if (field_type == PRINT_SYMBOL) + handler_name = "define_symbolic_value"; + + t = PyTuple_New(MAX_FIELDS); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + value = eval_flag(field_value); + + PyTuple_SetItem(t, n++, PyString_FromString(ev_name)); + PyTuple_SetItem(t, n++, PyString_FromString(field_name)); + PyTuple_SetItem(t, n++, PyInt_FromLong(value)); + PyTuple_SetItem(t, n++, PyString_FromString(field_str)); + + if (_PyTuple_Resize(&t, n) == -1) + Py_FatalError("error resizing Python tuple"); + + handler = PyDict_GetItemString(main_dict, handler_name); + if (handler && PyCallable_Check(handler)) { + retval = PyObject_CallObject(handler, t); + if (retval == NULL) + handler_call_die(handler_name); + } + + Py_DECREF(t); +} + +static void define_values(enum print_arg_type field_type, + struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_value(field_type, ev_name, field_name, field->value, + field->str); + + if (field->next) + define_values(field_type, field->next, ev_name, field_name); +} + +static void define_field(enum print_arg_type field_type, + const char *ev_name, + const char *field_name, + const char *delim) +{ + const char *handler_name = "define_flag_field"; + PyObject *handler, *t, *retval; + unsigned n = 0; + + if (field_type == PRINT_SYMBOL) + handler_name = "define_symbolic_field"; + + t = PyTuple_New(MAX_FIELDS); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + PyTuple_SetItem(t, n++, PyString_FromString(ev_name)); + PyTuple_SetItem(t, n++, PyString_FromString(field_name)); + if (field_type == PRINT_FLAGS) + PyTuple_SetItem(t, n++, PyString_FromString(delim)); + + if (_PyTuple_Resize(&t, n) == -1) + Py_FatalError("error resizing Python tuple"); + + handler = PyDict_GetItemString(main_dict, handler_name); + if (handler && PyCallable_Check(handler)) { + retval = PyObject_CallObject(handler, t); + if (retval == NULL) + handler_call_die(handler_name); + } + + Py_DECREF(t); +} + +static void define_event_symbols(struct event *event, + const char *ev_name, + struct print_arg *args) +{ + switch (args->type) { + case PRINT_NULL: + break; + case PRINT_ATOM: + define_value(PRINT_FLAGS, ev_name, cur_field_name, "0", + args->atom.atom); + zero_flag_atom = 0; + break; + case PRINT_FIELD: + if (cur_field_name) + free(cur_field_name); + cur_field_name = strdup(args->field.name); + break; + case PRINT_FLAGS: + define_event_symbols(event, ev_name, args->flags.field); + define_field(PRINT_FLAGS, ev_name, cur_field_name, + args->flags.delim); + define_values(PRINT_FLAGS, args->flags.flags, ev_name, + cur_field_name); + break; + case PRINT_SYMBOL: + define_event_symbols(event, ev_name, args->symbol.field); + define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL); + define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name, + cur_field_name); + break; + case PRINT_STRING: + break; + case PRINT_TYPE: + define_event_symbols(event, ev_name, args->typecast.item); + break; + case PRINT_OP: + if (strcmp(args->op.op, ":") == 0) + zero_flag_atom = 1; + define_event_symbols(event, ev_name, args->op.left); + define_event_symbols(event, ev_name, args->op.right); + break; + default: + /* we should warn... */ + return; + } + + if (args->next) + define_event_symbols(event, ev_name, args->next); +} + +static inline struct event *find_cache_event(int type) +{ + static char ev_name[256]; + struct event *event; + + if (events[type]) + return events[type]; + + events[type] = event = trace_find_event(type); + if (!event) + return NULL; + + sprintf(ev_name, "%s__%s", event->system, event->name); + + define_event_symbols(event, ev_name, event->print_fmt.args); + + return event; +} + +static void python_process_event(int cpu, void *data, + int size __unused, + unsigned long long nsecs, char *comm) +{ + PyObject *handler, *retval, *context, *t; + static char handler_name[256]; + struct format_field *field; + unsigned long long val; + unsigned long s, ns; + struct event *event; + unsigned n = 0; + int type; + int pid; + + t = PyTuple_New(MAX_FIELDS); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + type = trace_parse_common_type(data); + + event = find_cache_event(type); + if (!event) + die("ug! no event found for type %d", type); + + pid = trace_parse_common_pid(data); + + sprintf(handler_name, "%s__%s", event->system, event->name); + + s = nsecs / NSECS_PER_SEC; + ns = nsecs - s * NSECS_PER_SEC; + + scripting_context->event_data = data; + + context = PyCObject_FromVoidPtr(scripting_context, NULL); + + PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); + PyTuple_SetItem(t, n++, + PyCObject_FromVoidPtr(scripting_context, NULL)); + PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); + PyTuple_SetItem(t, n++, PyInt_FromLong(s)); + PyTuple_SetItem(t, n++, PyInt_FromLong(ns)); + PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); + PyTuple_SetItem(t, n++, PyString_FromString(comm)); + + for (field = event->format.fields; field; field = field->next) { + if (field->flags & FIELD_IS_STRING) { + int offset; + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(data + field->offset); + offset &= 0xffff; + } else + offset = field->offset; + PyTuple_SetItem(t, n++, + PyString_FromString((char *)data + offset)); + } else { /* FIELD_IS_NUMERIC */ + val = read_size(data + field->offset, field->size); + if (field->flags & FIELD_IS_SIGNED) { + PyTuple_SetItem(t, n++, PyInt_FromLong(val)); + } else { + PyTuple_SetItem(t, n++, PyInt_FromLong(val)); + } + } + } + + if (_PyTuple_Resize(&t, n) == -1) + Py_FatalError("error resizing Python tuple"); + + handler = PyDict_GetItemString(main_dict, handler_name); + if (handler && PyCallable_Check(handler)) { + retval = PyObject_CallObject(handler, t); + if (retval == NULL) + handler_call_die(handler_name); + } else { + handler = PyDict_GetItemString(main_dict, "trace_unhandled"); + if (handler && PyCallable_Check(handler)) { + if (_PyTuple_Resize(&t, N_COMMON_FIELDS) == -1) + Py_FatalError("error resizing Python tuple"); + + retval = PyObject_CallObject(handler, t); + if (retval == NULL) + handler_call_die("trace_unhandled"); + } + } + + Py_DECREF(t); +} + +static int run_start_sub(void) +{ + PyObject *handler, *retval; + int err = 0; + + main_module = PyImport_AddModule("__main__"); + if (main_module == NULL) + return -1; + Py_INCREF(main_module); + + main_dict = PyModule_GetDict(main_module); + if (main_dict == NULL) { + err = -1; + goto error; + } + Py_INCREF(main_dict); + + handler = PyDict_GetItemString(main_dict, "trace_begin"); + if (handler == NULL || !PyCallable_Check(handler)) + goto out; + + retval = PyObject_CallObject(handler, NULL); + if (retval == NULL) + handler_call_die("trace_begin"); + + Py_DECREF(retval); + return err; +error: + Py_XDECREF(main_dict); + Py_XDECREF(main_module); +out: + return err; +} + +/* + * Start trace script + */ +static int python_start_script(const char *script, int argc, const char **argv) +{ + const char **command_line; + char buf[PATH_MAX]; + int i, err = 0; + FILE *fp; + + command_line = malloc((argc + 1) * sizeof(const char *)); + command_line[0] = script; + for (i = 1; i < argc + 1; i++) + command_line[i] = argv[i - 1]; + + Py_Initialize(); + + initperf_trace_context(); + + PySys_SetArgv(argc + 1, (char **)command_line); + + fp = fopen(script, "r"); + if (!fp) { + sprintf(buf, "Can't open python script \"%s\"", script); + perror(buf); + err = -1; + goto error; + } + + err = PyRun_SimpleFile(fp, script); + if (err) { + fprintf(stderr, "Error running python script %s\n", script); + goto error; + } + + err = run_start_sub(); + if (err) { + fprintf(stderr, "Error starting python script %s\n", script); + goto error; + } + + free(command_line); + fprintf(stderr, "perf trace started with Python script %s\n\n", + script); + + return err; +error: + Py_Finalize(); + free(command_line); + + return err; +} + +/* + * Stop trace script + */ +static int python_stop_script(void) +{ + PyObject *handler, *retval; + int err = 0; + + handler = PyDict_GetItemString(main_dict, "trace_end"); + if (handler == NULL || !PyCallable_Check(handler)) + goto out; + + retval = PyObject_CallObject(handler, NULL); + if (retval == NULL) + handler_call_die("trace_end"); + else + Py_DECREF(retval); +out: + Py_XDECREF(main_dict); + Py_XDECREF(main_module); + Py_Finalize(); + + fprintf(stderr, "\nperf trace Python script stopped\n"); + + return err; +} + +static int python_generate_script(const char *outfile) +{ + struct event *event = NULL; + struct format_field *f; + char fname[PATH_MAX]; + int not_first, count; + FILE *ofp; + + sprintf(fname, "%s.py", outfile); + ofp = fopen(fname, "w"); + if (ofp == NULL) { + fprintf(stderr, "couldn't open %s\n", fname); + return -1; + } + fprintf(ofp, "# perf trace event handlers, " + "generated by perf trace -g python\n"); + + fprintf(ofp, "# Licensed under the terms of the GNU GPL" + " License version 2\n\n"); + + fprintf(ofp, "# The common_* event handler fields are the most useful " + "fields common to\n"); + + fprintf(ofp, "# all events. They don't necessarily correspond to " + "the 'common_*' fields\n"); + + fprintf(ofp, "# in the format files. Those fields not available as " + "handler params can\n"); + + fprintf(ofp, "# be retrieved using Python functions of the form " + "common_*(context).\n"); + + fprintf(ofp, "# See the perf-trace-python Documentation for the list " + "of available functions.\n\n"); + + fprintf(ofp, "import os\n"); + fprintf(ofp, "import sys\n\n"); + + fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n"); + fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n"); + fprintf(ofp, "\nfrom perf_trace_context import *\n"); + fprintf(ofp, "from Core import *\n\n\n"); + + fprintf(ofp, "def trace_begin():\n"); + fprintf(ofp, "\tprint \"in trace_begin\"\n\n"); + + fprintf(ofp, "def trace_end():\n"); + fprintf(ofp, "\tprint \"in trace_end\"\n\n"); + + while ((event = trace_find_next_event(event))) { + fprintf(ofp, "def %s__%s(", event->system, event->name); + fprintf(ofp, "event_name, "); + fprintf(ofp, "context, "); + fprintf(ofp, "common_cpu,\n"); + fprintf(ofp, "\tcommon_secs, "); + fprintf(ofp, "common_nsecs, "); + fprintf(ofp, "common_pid, "); + fprintf(ofp, "common_comm,\n\t"); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (++count % 5 == 0) + fprintf(ofp, "\n\t"); + + fprintf(ofp, "%s", f->name); + } + fprintf(ofp, "):\n"); + + fprintf(ofp, "\t\tprint_header(event_name, common_cpu, " + "common_secs, common_nsecs,\n\t\t\t" + "common_pid, common_comm)\n\n"); + + fprintf(ofp, "\t\tprint \""); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (count && count % 3 == 0) { + fprintf(ofp, "\" \\\n\t\t\""); + } + count++; + + fprintf(ofp, "%s=", f->name); + if (f->flags & FIELD_IS_STRING || + f->flags & FIELD_IS_FLAG || + f->flags & FIELD_IS_SYMBOLIC) + fprintf(ofp, "%%s"); + else if (f->flags & FIELD_IS_SIGNED) + fprintf(ofp, "%%d"); + else + fprintf(ofp, "%%u"); + } + + fprintf(ofp, "\\n\" %% \\\n\t\t("); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + + if (++count % 5 == 0) + fprintf(ofp, "\n\t\t"); + + if (f->flags & FIELD_IS_FLAG) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t\t"); + count = 4; + } + fprintf(ofp, "flag_str(\""); + fprintf(ofp, "%s__%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", %s)", f->name, + f->name); + } else if (f->flags & FIELD_IS_SYMBOLIC) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t\t"); + count = 4; + } + fprintf(ofp, "symbol_str(\""); + fprintf(ofp, "%s__%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", %s)", f->name, + f->name); + } else + fprintf(ofp, "%s", f->name); + } + + fprintf(ofp, "),\n\n"); + } + + fprintf(ofp, "def trace_unhandled(event_name, context, " + "common_cpu, common_secs, common_nsecs,\n\t\t" + "common_pid, common_comm):\n"); + + fprintf(ofp, "\t\tprint_header(event_name, common_cpu, " + "common_secs, common_nsecs,\n\t\tcommon_pid, " + "common_comm)\n\n"); + + fprintf(ofp, "def print_header(" + "event_name, cpu, secs, nsecs, pid, comm):\n" + "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" + "(event_name, cpu, secs, nsecs, pid, comm),\n"); + + fclose(ofp); + + fprintf(stderr, "generated Python script: %s\n", fname); + + return 0; +} + +struct scripting_ops python_scripting_ops = { + .name = "Python", + .start_script = python_start_script, + .stop_script = python_stop_script, + .process_event = python_process_event, + .generate_script = python_generate_script, +}; diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 9e371965c034..7ea983acfaea 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -44,6 +44,67 @@ static void process_event_unsupported(int cpu __unused, { } +static void print_python_unsupported_msg(void) +{ + fprintf(stderr, "Python scripting not supported." + " Install libpython and rebuild perf to enable it.\n" + "For example:\n # apt-get install python-dev (ubuntu)" + "\n # yum install python-devel (Fedora)" + "\n etc.\n"); +} + +static int python_start_script_unsupported(const char *script __unused, + int argc __unused, + const char **argv __unused) +{ + print_python_unsupported_msg(); + + return -1; +} + +static int python_generate_script_unsupported(const char *outfile __unused) +{ + print_python_unsupported_msg(); + + return -1; +} + +struct scripting_ops python_scripting_unsupported_ops = { + .name = "Python", + .start_script = python_start_script_unsupported, + .stop_script = stop_script_unsupported, + .process_event = process_event_unsupported, + .generate_script = python_generate_script_unsupported, +}; + +static void register_python_scripting(struct scripting_ops *scripting_ops) +{ + int err; + err = script_spec_register("Python", scripting_ops); + if (err) + die("error registering Python script extension"); + + err = script_spec_register("py", scripting_ops); + if (err) + die("error registering py script extension"); + + scripting_context = malloc(sizeof(struct scripting_context)); +} + +#ifdef NO_LIBPYTHON +void setup_python_scripting(void) +{ + register_python_scripting(&python_scripting_unsupported_ops); +} +#else +struct scripting_ops python_scripting_ops; + +void setup_python_scripting(void) +{ + register_python_scripting(&python_scripting_ops); +} +#endif + static void print_perl_unsupported_msg(void) { fprintf(stderr, "Perl scripting not supported." diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index aaf2da2d21e5..c3269b937db4 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -280,6 +280,7 @@ struct scripting_ops { int script_spec_register(const char *spec, struct scripting_ops *ops); void setup_perl_scripting(void); +void setup_python_scripting(void); struct scripting_context { void *event_data; -- cgit v1.2.3-58-ga151 From 4d161f0360d00d46a89827b3fd6da395f00c5d90 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:27:58 -0600 Subject: perf/scripts: Add syscall tracing scripts Adds a set of scripts that aggregate system call totals and system call errors. Most are Python scripts that also test basic functionality of the new Python engine, but there's also one Perl script added for comparison and for reference in some new Documentation contained in a later patch. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-8-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Makefile | 3 + .../perf/scripts/perl/bin/check-perf-trace-record | 2 +- tools/perf/scripts/perl/bin/failed-syscalls-record | 2 + tools/perf/scripts/perl/bin/failed-syscalls-report | 4 ++ tools/perf/scripts/perl/failed-syscalls.pl | 38 ++++++++++ .../python/bin/failed-syscalls-by-pid-record | 2 + .../python/bin/failed-syscalls-by-pid-report | 4 ++ .../python/bin/syscall-counts-by-pid-record | 2 + .../python/bin/syscall-counts-by-pid-report | 4 ++ .../perf/scripts/python/bin/syscall-counts-record | 2 + .../perf/scripts/python/bin/syscall-counts-report | 4 ++ tools/perf/scripts/python/check-perf-trace.py | 83 ++++++++++++++++++++++ .../perf/scripts/python/failed-syscalls-by-pid.py | 68 ++++++++++++++++++ tools/perf/scripts/python/syscall-counts-by-pid.py | 64 +++++++++++++++++ tools/perf/scripts/python/syscall-counts.py | 58 +++++++++++++++ 15 files changed, 339 insertions(+), 1 deletion(-) create mode 100644 tools/perf/scripts/perl/bin/failed-syscalls-record create mode 100644 tools/perf/scripts/perl/bin/failed-syscalls-report create mode 100644 tools/perf/scripts/perl/failed-syscalls.pl create mode 100644 tools/perf/scripts/python/bin/failed-syscalls-by-pid-record create mode 100644 tools/perf/scripts/python/bin/failed-syscalls-by-pid-report create mode 100644 tools/perf/scripts/python/bin/syscall-counts-by-pid-record create mode 100644 tools/perf/scripts/python/bin/syscall-counts-by-pid-report create mode 100644 tools/perf/scripts/python/bin/syscall-counts-record create mode 100644 tools/perf/scripts/python/bin/syscall-counts-report create mode 100644 tools/perf/scripts/python/check-perf-trace.py create mode 100644 tools/perf/scripts/python/failed-syscalls-by-pid.py create mode 100644 tools/perf/scripts/python/syscall-counts-by-pid.py create mode 100644 tools/perf/scripts/python/syscall-counts.py diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 14273164db04..54a5b50ff312 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -1032,7 +1032,10 @@ install: all $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' + $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' + $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' ifdef BUILT_INS $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record index 3c1574498942..e6cb1474f8e8 100644 --- a/tools/perf/scripts/perl/bin/check-perf-trace-record +++ b/tools/perf/scripts/perl/bin/check-perf-trace-record @@ -1,2 +1,2 @@ #!/bin/bash -perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry +perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record new file mode 100644 index 000000000000..f8885d389e6f --- /dev/null +++ b/tools/perf/scripts/perl/bin/failed-syscalls-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report new file mode 100644 index 000000000000..8bfc660e5056 --- /dev/null +++ b/tools/perf/scripts/perl/bin/failed-syscalls-report @@ -0,0 +1,4 @@ +#!/bin/bash +# description: system-wide failed syscalls +# args: [comm] +perf trace -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $1 diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl new file mode 100644 index 000000000000..c18e7e27a84b --- /dev/null +++ b/tools/perf/scripts/perl/failed-syscalls.pl @@ -0,0 +1,38 @@ +# failed system call counts +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide failed system call totals +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; +use lib "./Perf-Trace-Util/lib"; +use Perf::Trace::Core; +use Perf::Trace::Context; +use Perf::Trace::Util; + +my %failed_syscalls; + +sub raw_syscalls::sys_exit +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $id, $ret) = @_; + + if ($ret < 0) { + $failed_syscalls{$common_comm}++; + } +} + +sub trace_end +{ + printf("\nfailed syscalls by comm:\n\n"); + + printf("%-20s %10s\n", "comm", "# errors"); + printf("%-20s %6s %10s\n", "--------------------", "----------"); + + foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}} + keys %failed_syscalls) { + printf("%-20s %10s\n", $comm, $failed_syscalls{$comm}); + } +} diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record new file mode 100644 index 000000000000..f8885d389e6f --- /dev/null +++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report new file mode 100644 index 000000000000..1e0c0a860c87 --- /dev/null +++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report @@ -0,0 +1,4 @@ +#!/bin/bash +# description: system-wide failed syscalls, by pid +# args: [comm] +perf trace -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $1 diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record new file mode 100644 index 000000000000..45a8c50359da --- /dev/null +++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report new file mode 100644 index 000000000000..f8044d192271 --- /dev/null +++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report @@ -0,0 +1,4 @@ +#!/bin/bash +# description: system-wide syscall counts, by pid +# args: [comm] +perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $1 diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record new file mode 100644 index 000000000000..45a8c50359da --- /dev/null +++ b/tools/perf/scripts/python/bin/syscall-counts-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report new file mode 100644 index 000000000000..a366aa61612f --- /dev/null +++ b/tools/perf/scripts/python/bin/syscall-counts-report @@ -0,0 +1,4 @@ +#!/bin/bash +# description: system-wide syscall counts +# args: [comm] +perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py $1 diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py new file mode 100644 index 000000000000..964d934395ff --- /dev/null +++ b/tools/perf/scripts/python/check-perf-trace.py @@ -0,0 +1,83 @@ +# perf trace event handlers, generated by perf trace -g python +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# This script tests basic functionality such as flag and symbol +# strings, common_xxx() calls back into perf, begin, end, unhandled +# events, etc. Basically, if this script runs successfully and +# displays expected results, Python scripting support should be ok. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from Core import * +from perf_trace_context import * + +unhandled = autodict() + +def trace_begin(): + print "trace_begin" + pass + +def trace_end(): + print_unhandled() + +def irq__softirq_entry(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + vec): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + + print_uncommon(context) + + print "vec=%s\n" % \ + (symbol_str("irq__softirq_entry", "vec", vec)), + +def kmem__kmalloc(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + call_site, ptr, bytes_req, bytes_alloc, + gfp_flags): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + + print_uncommon(context) + + print "call_site=%u, ptr=%u, bytes_req=%u, " \ + "bytes_alloc=%u, gfp_flags=%s\n" % \ + (call_site, ptr, bytes_req, bytes_alloc, + + flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)), + +def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, + common_pid, common_comm): + try: + unhandled[event_name] += 1 + except TypeError: + unhandled[event_name] = 1 + +def print_header(event_name, cpu, secs, nsecs, pid, comm): + print "%-20s %5u %05u.%09u %8u %-20s " % \ + (event_name, cpu, secs, nsecs, pid, comm), + +# print trace fields not included in handler args +def print_uncommon(context): + print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \ + % (common_pc(context), trace_flag_str(common_flags(context)), \ + common_lock_depth(context)) + +def print_unhandled(): + keys = unhandled.keys() + if not keys: + return + + print "\nunhandled events:\n\n", + + print "%-40s %10s\n" % ("event", "count"), + print "%-40s %10s\n" % ("----------------------------------------", \ + "-----------"), + + for event_name in keys: + print "%-40s %10d\n" % (event_name, unhandled[event_name]) diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py new file mode 100644 index 000000000000..0ca02278fe69 --- /dev/null +++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py @@ -0,0 +1,68 @@ +# failed system call counts, by pid +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide failed system call totals, broken down by pid. +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * + +usage = "perf trace -s syscall-counts-by-pid.py [comm]\n"; + +for_comm = None + +if len(sys.argv) > 2: + sys.exit(usage) + +if len(sys.argv) > 1: + for_comm = sys.argv[1] + +syscalls = autodict() + +def trace_begin(): + pass + +def trace_end(): + print_error_totals() + +def raw_syscalls__sys_exit(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, ret): + if for_comm is not None: + if common_comm != for_comm: + return + + if ret < 0: + try: + syscalls[common_comm][common_pid][id][ret] += 1 + except TypeError: + syscalls[common_comm][common_pid][id][ret] = 1 + +def print_error_totals(): + if for_comm is not None: + print "\nsyscall errors for %s:\n\n" % (for_comm), + else: + print "\nsyscall errors:\n\n", + + print "%-30s %10s\n" % ("comm [pid]", "count"), + print "%-30s %10s\n" % ("------------------------------", \ + "----------"), + + comm_keys = syscalls.keys() + for comm in comm_keys: + pid_keys = syscalls[comm].keys() + for pid in pid_keys: + print "\n%s [%d]\n" % (comm, pid), + id_keys = syscalls[comm][pid].keys() + for id in id_keys: + print " syscall: %-16d\n" % (id), + ret_keys = syscalls[comm][pid][id].keys() + for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): + print " err = %-20d %10d\n" % (ret, val), diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py new file mode 100644 index 000000000000..af722d6a4b3f --- /dev/null +++ b/tools/perf/scripts/python/syscall-counts-by-pid.py @@ -0,0 +1,64 @@ +# system call counts, by pid +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide system call totals, broken down by syscall. +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * + +usage = "perf trace -s syscall-counts-by-pid.py [comm]\n"; + +for_comm = None + +if len(sys.argv) > 2: + sys.exit(usage) + +if len(sys.argv) > 1: + for_comm = sys.argv[1] + +syscalls = autodict() + +def trace_begin(): + pass + +def trace_end(): + print_syscall_totals() + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + if for_comm is not None: + if common_comm != for_comm: + return + try: + syscalls[common_comm][common_pid][id] += 1 + except TypeError: + syscalls[common_comm][common_pid][id] = 1 + +def print_syscall_totals(): + if for_comm is not None: + print "\nsyscall events for %s:\n\n" % (for_comm), + else: + print "\nsyscall events by comm/pid:\n\n", + + print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"), + print "%-40s %10s\n" % ("----------------------------------------", \ + "----------"), + + comm_keys = syscalls.keys() + for comm in comm_keys: + pid_keys = syscalls[comm].keys() + for pid in pid_keys: + print "\n%s [%d]\n" % (comm, pid), + id_keys = syscalls[comm][pid].keys() + for id, val in sorted(syscalls[comm][pid].iteritems(), \ + key = lambda(k, v): (v, k), reverse = True): + print " %-38d %10d\n" % (id, val), diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py new file mode 100644 index 000000000000..f977e85ff049 --- /dev/null +++ b/tools/perf/scripts/python/syscall-counts.py @@ -0,0 +1,58 @@ +# system call counts +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide system call totals, broken down by syscall. +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * + +usage = "perf trace -s syscall-counts.py [comm]\n"; + +for_comm = None + +if len(sys.argv) > 2: + sys.exit(usage) + +if len(sys.argv) > 1: + for_comm = sys.argv[1] + +syscalls = autodict() + +def trace_begin(): + pass + +def trace_end(): + print_syscall_totals() + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + if for_comm is not None: + if common_comm != for_comm: + return + try: + syscalls[id] += 1 + except TypeError: + syscalls[id] = 1 + +def print_syscall_totals(): + if for_comm is not None: + print "\nsyscall events for %s:\n\n" % (for_comm), + else: + print "\nsyscall events:\n\n", + + print "%-40s %10s\n" % ("event", "count"), + print "%-40s %10s\n" % ("----------------------------------------", \ + "-----------"), + + for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ + reverse = True): + print "%-40d %10d\n" % (id, val), -- cgit v1.2.3-58-ga151 From 44ad9cd8f0893b9ae0ac729a7dc2a1ebcd170ac6 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 22 Feb 2010 01:12:59 -0600 Subject: perf/scripts: Remove unnecessary PyTuple resizes If we know the size of a tuple in advance, there's no need to resize it - start out with the known size in the first place. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1266822779.6426.4.camel@tropicana> Signed-off-by: Frederic Weisbecker --- tools/perf/util/scripting-engines/trace-event-python.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index d402f64f9b46..33a414bbba3e 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -68,7 +68,7 @@ static void define_value(enum print_arg_type field_type, if (field_type == PRINT_SYMBOL) handler_name = "define_symbolic_value"; - t = PyTuple_New(MAX_FIELDS); + t = PyTuple_New(4); if (!t) Py_FatalError("couldn't create Python tuple"); @@ -79,9 +79,6 @@ static void define_value(enum print_arg_type field_type, PyTuple_SetItem(t, n++, PyInt_FromLong(value)); PyTuple_SetItem(t, n++, PyString_FromString(field_str)); - if (_PyTuple_Resize(&t, n) == -1) - Py_FatalError("error resizing Python tuple"); - handler = PyDict_GetItemString(main_dict, handler_name); if (handler && PyCallable_Check(handler)) { retval = PyObject_CallObject(handler, t); @@ -116,7 +113,10 @@ static void define_field(enum print_arg_type field_type, if (field_type == PRINT_SYMBOL) handler_name = "define_symbolic_field"; - t = PyTuple_New(MAX_FIELDS); + if (field_type == PRINT_FLAGS) + t = PyTuple_New(3); + else + t = PyTuple_New(2); if (!t) Py_FatalError("couldn't create Python tuple"); @@ -125,9 +125,6 @@ static void define_field(enum print_arg_type field_type, if (field_type == PRINT_FLAGS) PyTuple_SetItem(t, n++, PyString_FromString(delim)); - if (_PyTuple_Resize(&t, n) == -1) - Py_FatalError("error resizing Python tuple"); - handler = PyDict_GetItemString(main_dict, handler_name); if (handler && PyCallable_Check(handler)) { retval = PyObject_CallObject(handler, t); -- cgit v1.2.3-58-ga151 From cff68e582237cae3cf456f01153202175961dfbe Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Jan 2010 02:28:03 -0600 Subject: perf/scripts: Add perf-trace-python Documentation Also small update to perf-trace-perl and perf-trace docs. Signed-off-by: Tom Zanussi Cc: Ingo Molnar Cc: Steven Rostedt Cc: Keiichi KII Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1264580883-15324-13-git-send-email-tzanussi@gmail.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Documentation/perf-trace-perl.txt | 3 +- tools/perf/Documentation/perf-trace-python.txt | 624 +++++++++++++++++++++++++ tools/perf/Documentation/perf-trace.txt | 11 +- 3 files changed, 636 insertions(+), 2 deletions(-) create mode 100644 tools/perf/Documentation/perf-trace-python.txt diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt index c5f55f439091..d2206c3c7aa6 100644 --- a/tools/perf/Documentation/perf-trace-perl.txt +++ b/tools/perf/Documentation/perf-trace-perl.txt @@ -8,7 +8,7 @@ perf-trace-perl - Process trace data with a Perl script SYNOPSIS -------- [verse] -'perf trace' [-s [lang]:script[.ext] ] +'perf trace' [-s [Perl]:script[.pl] ] DESCRIPTION ----------- @@ -213,6 +213,7 @@ Various utility functions for use with perf trace: nsecs_nsecs($nsecs) - returns nsecs remainder given nsecs nsecs_str($nsecs) - returns printable string in the form secs.nsecs avg($total, $n) - returns average given a sum and a total number of values + syscall_name($id) - returns the syscall name for the specified syscall_nr SEE ALSO -------- diff --git a/tools/perf/Documentation/perf-trace-python.txt b/tools/perf/Documentation/perf-trace-python.txt new file mode 100644 index 000000000000..119d5deba1db --- /dev/null +++ b/tools/perf/Documentation/perf-trace-python.txt @@ -0,0 +1,624 @@ +perf-trace-python(1) +================== + +NAME +---- +perf-trace-python - Process trace data with a Python script + +SYNOPSIS +-------- +[verse] +'perf trace' [-s [Python]:script[.py] ] + +DESCRIPTION +----------- + +This perf trace option is used to process perf trace data using perf's +built-in Python interpreter. It reads and processes the input file and +displays the results of the trace analysis implemented in the given +Python script, if any. + +A QUICK EXAMPLE +--------------- + +This section shows the process, start to finish, of creating a working +Python script that aggregates and extracts useful information from a +raw perf trace stream. You can avoid reading the rest of this +document if an example is enough for you; the rest of the document +provides more details on each step and lists the library functions +available to script writers. + +This example actually details the steps that were used to create the +'syscall-counts' script you see when you list the available perf trace +scripts via 'perf trace -l'. As such, this script also shows how to +integrate your script into the list of general-purpose 'perf trace' +scripts listed by that command. + +The syscall-counts script is a simple script, but demonstrates all the +basic ideas necessary to create a useful script. Here's an example +of its output: + +---- +syscall events: + +event count +---------------------------------------- ----------- +sys_write 455067 +sys_getdents 4072 +sys_close 3037 +sys_swapoff 1769 +sys_read 923 +sys_sched_setparam 826 +sys_open 331 +sys_newfstat 326 +sys_mmap 217 +sys_munmap 216 +sys_futex 141 +sys_select 102 +sys_poll 84 +sys_setitimer 12 +sys_writev 8 +15 8 +sys_lseek 7 +sys_rt_sigprocmask 6 +sys_wait4 3 +sys_ioctl 3 +sys_set_robust_list 1 +sys_exit 1 +56 1 +sys_access 1 +---- + +Basically our task is to keep a per-syscall tally that gets updated +every time a system call occurs in the system. Our script will do +that, but first we need to record the data that will be processed by +that script. Theoretically, there are a couple of ways we could do +that: + +- we could enable every event under the tracing/events/syscalls + directory, but this is over 600 syscalls, well beyond the number + allowable by perf. These individual syscall events will however be + useful if we want to later use the guidance we get from the + general-purpose scripts to drill down and get more detail about + individual syscalls of interest. + +- we can enable the sys_enter and/or sys_exit syscalls found under + tracing/events/raw_syscalls. These are called for all syscalls; the + 'id' field can be used to distinguish between individual syscall + numbers. + +For this script, we only need to know that a syscall was entered; we +don't care how it exited, so we'll use 'perf record' to record only +the sys_enter events: + +---- +# perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter + +^C[ perf record: Woken up 1 times to write data ] +[ perf record: Captured and wrote 56.545 MB perf.data (~2470503 samples) ] +---- + +The options basically say to collect data for every syscall event +system-wide and multiplex the per-cpu output into a single stream. +That single stream will be recorded in a file in the current directory +called perf.data. + +Once we have a perf.data file containing our data, we can use the -g +'perf trace' option to generate a Python script that will contain a +callback handler for each event type found in the perf.data trace +stream (for more details, see the STARTER SCRIPTS section). + +---- +# perf trace -g python +generated Python script: perf-trace.py + +The output file created also in the current directory is named +perf-trace.py. Here's the file in its entirety: + +# perf trace event handlers, generated by perf trace -g python +# Licensed under the terms of the GNU GPL License version 2 + +# The common_* event handler fields are the most useful fields common to +# all events. They don't necessarily correspond to the 'common_*' fields +# in the format files. Those fields not available as handler params can +# be retrieved using Python functions of the form common_*(context). +# See the perf-trace-python Documentation for the list of available functions. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * + +def trace_begin(): + print "in trace_begin" + +def trace_end(): + print "in trace_end" + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + + print "id=%d, args=%s\n" % \ + (id, args), + +def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, + common_pid, common_comm): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + +def print_header(event_name, cpu, secs, nsecs, pid, comm): + print "%-20s %5u %05u.%09u %8u %-20s " % \ + (event_name, cpu, secs, nsecs, pid, comm), +---- + +At the top is a comment block followed by some import statements and a +path append which every perf trace script should include. + +Following that are a couple generated functions, trace_begin() and +trace_end(), which are called at the beginning and the end of the +script respectively (for more details, see the SCRIPT_LAYOUT section +below). + +Following those are the 'event handler' functions generated one for +every event in the 'perf record' output. The handler functions take +the form subsystem__event_name, and contain named parameters, one for +each field in the event; in this case, there's only one event, +raw_syscalls__sys_enter(). (see the EVENT HANDLERS section below for +more info on event handlers). + +The final couple of functions are, like the begin and end functions, +generated for every script. The first, trace_unhandled(), is called +every time the script finds an event in the perf.data file that +doesn't correspond to any event handler in the script. This could +mean either that the record step recorded event types that it wasn't +really interested in, or the script was run against a trace file that +doesn't correspond to the script. + +The script generated by -g option option simply prints a line for each +event found in the trace stream i.e. it basically just dumps the event +and its parameter values to stdout. The print_header() function is +simply a utility function used for that purpose. Let's rename the +script and run it to see the default output: + +---- +# mv perf-trace.py syscall-counts.py +# perf trace -s syscall-counts.py + +raw_syscalls__sys_enter 1 00840.847582083 7506 perf id=1, args= +raw_syscalls__sys_enter 1 00840.847595764 7506 perf id=1, args= +raw_syscalls__sys_enter 1 00840.847620860 7506 perf id=1, args= +raw_syscalls__sys_enter 1 00840.847710478 6533 npviewer.bin id=78, args= +raw_syscalls__sys_enter 1 00840.847719204 6533 npviewer.bin id=142, args= +raw_syscalls__sys_enter 1 00840.847755445 6533 npviewer.bin id=3, args= +raw_syscalls__sys_enter 1 00840.847775601 6533 npviewer.bin id=3, args= +raw_syscalls__sys_enter 1 00840.847781820 6533 npviewer.bin id=3, args= +. +. +. +---- + +Of course, for this script, we're not interested in printing every +trace event, but rather aggregating it in a useful way. So we'll get +rid of everything to do with printing as well as the trace_begin() and +trace_unhandled() functions, which we won't be using. That leaves us +with this minimalistic skeleton: + +---- +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * + +def trace_end(): + print "in trace_end" + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): +---- + +In trace_end(), we'll simply print the results, but first we need to +generate some results to print. To do that we need to have our +sys_enter() handler do the necessary tallying until all events have +been counted. A hash table indexed by syscall id is a good way to +store that information; every time the sys_enter() handler is called, +we simply increment a count associated with that hash entry indexed by +that syscall id: + +---- + syscalls = autodict() + + try: + syscalls[id] += 1 + except TypeError: + syscalls[id] = 1 +---- + +The syscalls 'autodict' object is a special kind of Python dictionary +(implemented in Core.py) that implements Perl's 'autovivifying' hashes +in Python i.e. with autovivifying hashes, you can assign nested hash +values without having to go to the trouble of creating intermediate +levels if they don't exist e.g syscalls[comm][pid][id] = 1 will create +the intermediate hash levels and finally assign the value 1 to the +hash entry for 'id' (because the value being assigned isn't a hash +object itself, the initial value is assigned in the TypeError +exception. Well, there may be a better way to do this in Python but +that's what works for now). + +Putting that code into the raw_syscalls__sys_enter() handler, we +effectively end up with a single-level dictionary keyed on syscall id +and having the counts we've tallied as values. + +The print_syscall_totals() function iterates over the entries in the +dictionary and displays a line for each entry containing the syscall +name (the dictonary keys contain the syscall ids, which are passed to +the Util function syscall_name(), which translates the raw syscall +numbers to the corresponding syscall name strings). The output is +displayed after all the events in the trace have been processed, by +calling the print_syscall_totals() function from the trace_end() +handler called at the end of script processing. + +The final script producing the output shown above is shown in its +entirety below: + +---- +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from Util import * + +syscalls = autodict() + +def trace_end(): + print_syscall_totals() + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + try: + syscalls[id] += 1 + except TypeError: + syscalls[id] = 1 + +def print_syscall_totals(): + if for_comm is not None: + print "\nsyscall events for %s:\n\n" % (for_comm), + else: + print "\nsyscall events:\n\n", + + print "%-40s %10s\n" % ("event", "count"), + print "%-40s %10s\n" % ("----------------------------------------", \ + "-----------"), + + for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ + reverse = True): + print "%-40s %10d\n" % (syscall_name(id), val), +---- + +The script can be run just as before: + + # perf trace -s syscall-counts.py + +So those are the essential steps in writing and running a script. The +process can be generalized to any tracepoint or set of tracepoints +you're interested in - basically find the tracepoint(s) you're +interested in by looking at the list of available events shown by +'perf list' and/or look in /sys/kernel/debug/tracing events for +detailed event and field info, record the corresponding trace data +using 'perf record', passing it the list of interesting events, +generate a skeleton script using 'perf trace -g python' and modify the +code to aggregate and display it for your particular needs. + +After you've done that you may end up with a general-purpose script +that you want to keep around and have available for future use. By +writing a couple of very simple shell scripts and putting them in the +right place, you can have your script listed alongside the other +scripts listed by the 'perf trace -l' command e.g.: + +---- +root@tropicana:~# perf trace -l +List of available trace scripts: + workqueue-stats workqueue stats (ins/exe/create/destroy) + wakeup-latency system-wide min/max/avg wakeup latency + rw-by-file r/w activity for a program, by file + rw-by-pid system-wide r/w activity +---- + +A nice side effect of doing this is that you also then capture the +probably lengthy 'perf record' command needed to record the events for +the script. + +To have the script appear as a 'built-in' script, you write two simple +scripts, one for recording and one for 'reporting'. + +The 'record' script is a shell script with the same base name as your +script, but with -record appended. The shell script should be put +into the perf/scripts/python/bin directory in the kernel source tree. +In that script, you write the 'perf record' command-line needed for +your script: + +---- +# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record + +#!/bin/bash +perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter +---- + +The 'report' script is also a shell script with the same base name as +your script, but with -report appended. It should also be located in +the perf/scripts/python/bin directory. In that script, you write the +'perf trace -s' command-line needed for running your script: + +---- +# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-report + +#!/bin/bash +# description: system-wide syscall counts +perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py +---- + +Note that the location of the Python script given in the shell script +is in the libexec/perf-core/scripts/python directory - this is where +the script will be copied by 'make install' when you install perf. +For the installation to install your script there, your script needs +to be located in the perf/scripts/python directory in the kernel +source tree: + +---- +# ls -al kernel-source/tools/perf/scripts/python + +root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python +total 32 +drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . +drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. +drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin +-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-trace.py +drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util +-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py +---- + +Once you've done that (don't forget to do a new 'make install', +otherwise your script won't show up at run-time), 'perf trace -l' +should show a new entry for your script: + +---- +root@tropicana:~# perf trace -l +List of available trace scripts: + workqueue-stats workqueue stats (ins/exe/create/destroy) + wakeup-latency system-wide min/max/avg wakeup latency + rw-by-file r/w activity for a program, by file + rw-by-pid system-wide r/w activity + syscall-counts system-wide syscall counts +---- + +You can now perform the record step via 'perf trace record': + + # perf trace record syscall-counts + +and display the output using 'perf trace report': + + # perf trace report syscall-counts + +STARTER SCRIPTS +--------------- + +You can quickly get started writing a script for a particular set of +trace data by generating a skeleton script using 'perf trace -g +python' in the same directory as an existing perf.data trace file. +That will generate a starter script containing a handler for each of +the event types in the trace file; it simply prints every available +field for each event in the trace file. + +You can also look at the existing scripts in +~/libexec/perf-core/scripts/python for typical examples showing how to +do basic things like aggregate event data, print results, etc. Also, +the check-perf-trace.py script, while not interesting for its results, +attempts to exercise all of the main scripting features. + +EVENT HANDLERS +-------------- + +When perf trace is invoked using a trace script, a user-defined +'handler function' is called for each event in the trace. If there's +no handler function defined for a given event type, the event is +ignored (or passed to a 'trace_handled' function, see below) and the +next event is processed. + +Most of the event's field values are passed as arguments to the +handler function; some of the less common ones aren't - those are +available as calls back into the perf executable (see below). + +As an example, the following perf record command can be used to record +all sched_wakeup events in the system: + + # perf record -c 1 -f -a -M -R -e sched:sched_wakeup + +Traces meant to be processed using a script should be recorded with +the above options: -c 1 says to sample every event, -a to enable +system-wide collection, -M to multiplex the output, and -R to collect +raw samples. + +The format file for the sched_wakep event defines the following fields +(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format): + +---- + format: + field:unsigned short common_type; + field:unsigned char common_flags; + field:unsigned char common_preempt_count; + field:int common_pid; + field:int common_lock_depth; + + field:char comm[TASK_COMM_LEN]; + field:pid_t pid; + field:int prio; + field:int success; + field:int target_cpu; +---- + +The handler function for this event would be defined as: + +---- +def sched__sched_wakeup(event_name, context, common_cpu, common_secs, + common_nsecs, common_pid, common_comm, + comm, pid, prio, success, target_cpu): + pass +---- + +The handler function takes the form subsystem__event_name. + +The common_* arguments in the handler's argument list are the set of +arguments passed to all event handlers; some of the fields correspond +to the common_* fields in the format file, but some are synthesized, +and some of the common_* fields aren't common enough to to be passed +to every event as arguments but are available as library functions. + +Here's a brief description of each of the invariant event args: + + event_name the name of the event as text + context an opaque 'cookie' used in calls back into perf + common_cpu the cpu the event occurred on + common_secs the secs portion of the event timestamp + common_nsecs the nsecs portion of the event timestamp + common_pid the pid of the current task + common_comm the name of the current process + +All of the remaining fields in the event's format file have +counterparts as handler function arguments of the same name, as can be +seen in the example above. + +The above provides the basics needed to directly access every field of +every event in a trace, which covers 90% of what you need to know to +write a useful trace script. The sections below cover the rest. + +SCRIPT LAYOUT +------------- + +Every perf trace Python script should start by setting up a Python +module search path and 'import'ing a few support modules (see module +descriptions below): + +---- + import os + import sys + + sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + + from perf_trace_context import * + from Core import * +---- + +The rest of the script can contain handler functions and support +functions in any order. + +Aside from the event handler functions discussed above, every script +can implement a set of optional functions: + +*trace_begin*, if defined, is called before any event is processed and +gives scripts a chance to do setup tasks: + +---- +def trace_begin: + pass +---- + +*trace_end*, if defined, is called after all events have been + processed and gives scripts a chance to do end-of-script tasks, such + as display results: + +---- +def trace_end: + pass +---- + +*trace_unhandled*, if defined, is called after for any event that + doesn't have a handler explicitly defined for it. The standard set + of common arguments are passed into it: + +---- +def trace_unhandled(event_name, context, common_cpu, common_secs, + common_nsecs, common_pid, common_comm): + pass +---- + +The remaining sections provide descriptions of each of the available +built-in perf trace Python modules and their associated functions. + +AVAILABLE MODULES AND FUNCTIONS +------------------------------- + +The following sections describe the functions and variables available +via the various perf trace Python modules. To use the functions and +variables from the given module, add the corresponding 'from XXXX +import' line to your perf trace script. + +Core.py Module +~~~~~~~~~~~~~~ + +These functions provide some essential functions to user scripts. + +The *flag_str* and *symbol_str* functions provide human-readable +strings for flag and symbolic fields. These correspond to the strings +and values parsed from the 'print fmt' fields of the event format +files: + + flag_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the flag field field_name of event event_name + symbol_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the symbolic field field_name of event event_name + +The *autodict* function returns a special special kind of Python +dictionary that implements Perl's 'autovivifying' hashes in Python +i.e. with autovivifying hashes, you can assign nested hash values +without having to go to the trouble of creating intermediate levels if +they don't exist. + + autodict() - returns an autovivifying dictionary instance + + +perf_trace_context Module +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some of the 'common' fields in the event format file aren't all that +common, but need to be made accessible to user scripts nonetheless. + +perf_trace_context defines a set of functions that can be used to +access this data in the context of the current event. Each of these +functions expects a context variable, which is the same as the +context variable passed into every event handler as the second +argument. + + common_pc(context) - returns common_preempt count for the current event + common_flags(context) - returns common_flags for the current event + common_lock_depth(context) - returns common_lock_depth for the current event + +Util.py Module +~~~~~~~~~~~~~~ + +Various utility functions for use with perf trace: + + nsecs(secs, nsecs) - returns total nsecs given secs/nsecs pair + nsecs_secs(nsecs) - returns whole secs portion given nsecs + nsecs_nsecs(nsecs) - returns nsecs remainder given nsecs + nsecs_str(nsecs) - returns printable string in the form secs.nsecs + avg(total, n) - returns average given a sum and a total number of values + syscall_name(id) - returns the syscall name for the specified syscall_nr + +SEE ALSO +-------- +linkperf:perf-trace[1] diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index c00a76fcb8d6..8879299cd9df 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt @@ -19,6 +19,11 @@ There are several variants of perf trace: 'perf trace' to see a detailed trace of the workload that was recorded. + You can also run a set of pre-canned scripts that aggregate and + summarize the raw trace data in various ways (the list of scripts is + available via 'perf trace -l'). The following variants allow you to + record and run those scripts: + 'perf trace record