summaryrefslogtreecommitdiff
path: root/tools/perf/util/thread.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2018-04-26 16:52:34 -0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2018-04-27 10:47:06 -0300
commit3183f8ca304fd84096c44332f9bb699943beb6f1 (patch)
treebe40e5bb5eb54f4ce203f606c509d733c81ee8fe /tools/perf/util/thread.c
parente9814df8645d82b6c5d185537f9510028e35c385 (diff)
perf symbols: Unify symbol maps
Remove the split of symbol tables for data (MAP__VARIABLE) and for functions (MAP__FUNCTION), its unneeded and there were various places doing two lookups to find a symbol, so simplify this. We still will consider only the symbols that matched the filters in place, i.e. see the (elf_(sec,sym)|symbol_type)__filter() routines in the patch, just so that we consider only the same symbols as before, to reduce the possibility of regressions. All the tests on 50-something build environments, in varios versions of lots of distros and cross build environments were performed without build regressions, as usual with all pull requests the other tests were also performed: 'perf test' and 'make -C tools/perf build-test'. Also this was done at a great granularity so that regressions can be bisected more easily. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-hiq0fy2rsleupnqqwuojo1ne@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/thread.c')
-rw-r--r--tools/perf/util/thread.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index ddbf0470b048..2048d393ece6 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -302,23 +302,20 @@ int thread__insert_map(struct thread *thread, struct map *map)
static int __thread__prepare_access(struct thread *thread)
{
bool initialized = false;
- int i, err = 0;
-
- for (i = 0; i < MAP__NR_TYPES; ++i) {
- struct maps *maps = &thread->mg->maps[i];
- struct map *map;
+ int err = 0;
+ struct maps *maps = &thread->mg->maps;
+ struct map *map;
- down_read(&maps->lock);
+ down_read(&maps->lock);
- for (map = maps__first(maps); map; map = map__next(map)) {
- err = unwind__prepare_access(thread, map, &initialized);
- if (err || initialized)
- break;
- }
-
- up_read(&maps->lock);
+ for (map = maps__first(maps); map; map = map__next(map)) {
+ err = unwind__prepare_access(thread, map, &initialized);
+ if (err || initialized)
+ break;
}
+ up_read(&maps->lock);
+
return err;
}
@@ -335,8 +332,6 @@ static int thread__prepare_access(struct thread *thread)
static int thread__clone_map_groups(struct thread *thread,
struct thread *parent)
{
- int i;
-
/* This is new thread, we share map groups for process. */
if (thread->pid_ == parent->pid_)
return thread__prepare_access(thread);
@@ -348,9 +343,8 @@ static int thread__clone_map_groups(struct thread *thread,
}
/* But this one is new process, copy maps. */
- for (i = 0; i < MAP__NR_TYPES; ++i)
- if (map_groups__clone(thread, parent->mg, i) < 0)
- return -ENOMEM;
+ if (map_groups__clone(thread, parent->mg) < 0)
+ return -ENOMEM;
return 0;
}