perf unwind: Call unwind__prepare_access for forked thread
Currently we call unwind__prepare_access for map event. In case we report fork event the thread inherits its parent's maps and unwind__prepare_access is never called for the thread. This causes unwind__get_entries seeing uninitialized unwind_libunwind_ops and thus returning no callchain. Adding unwind__prepare_access calls for fork even processing. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: David Ahern <dsahern@gmail.com> Cc: He Kuang <hekuang@huawei.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1467634583-29147-5-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
a2873325ff
commit
6c50258443
3 changed files with 44 additions and 4 deletions
|
@ -15,6 +15,7 @@
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "machine.h"
|
#include "machine.h"
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
#include "unwind.h"
|
||||||
|
|
||||||
static void __maps__insert(struct maps *maps, struct map *map);
|
static void __maps__insert(struct maps *maps, struct map *map);
|
||||||
|
|
||||||
|
@ -744,9 +745,10 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
|
||||||
/*
|
/*
|
||||||
* XXX This should not really _copy_ te maps, but refcount them.
|
* XXX This should not really _copy_ te maps, but refcount them.
|
||||||
*/
|
*/
|
||||||
int map_groups__clone(struct map_groups *mg,
|
int map_groups__clone(struct thread *thread,
|
||||||
struct map_groups *parent, enum map_type type)
|
struct map_groups *parent, enum map_type type)
|
||||||
{
|
{
|
||||||
|
struct map_groups *mg = thread->mg;
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
struct map *map;
|
struct map *map;
|
||||||
struct maps *maps = &parent->maps[type];
|
struct maps *maps = &parent->maps[type];
|
||||||
|
@ -757,6 +759,11 @@ int map_groups__clone(struct map_groups *mg,
|
||||||
struct map *new = map__clone(map);
|
struct map *new = map__clone(map);
|
||||||
if (new == NULL)
|
if (new == NULL)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
err = unwind__prepare_access(thread, new, NULL);
|
||||||
|
if (err)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
map_groups__insert(mg, new);
|
map_groups__insert(mg, new);
|
||||||
map__put(new);
|
map__put(new);
|
||||||
}
|
}
|
||||||
|
|
|
@ -194,7 +194,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
|
||||||
struct map **mapp, symbol_filter_t filter);
|
struct map **mapp, symbol_filter_t filter);
|
||||||
void map_groups__init(struct map_groups *mg, struct machine *machine);
|
void map_groups__init(struct map_groups *mg, struct machine *machine);
|
||||||
void map_groups__exit(struct map_groups *mg);
|
void map_groups__exit(struct map_groups *mg);
|
||||||
int map_groups__clone(struct map_groups *mg,
|
int map_groups__clone(struct thread *thread,
|
||||||
struct map_groups *parent, enum map_type type);
|
struct map_groups *parent, enum map_type type);
|
||||||
size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
|
size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
|
||||||
|
|
||||||
|
|
|
@ -212,6 +212,39 @@ int thread__insert_map(struct thread *thread, struct map *map)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __thread__prepare_access(struct thread *thread)
|
||||||
|
{
|
||||||
|
bool initialized = false;
|
||||||
|
int i, err = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
||||||
|
struct maps *maps = &thread->mg->maps[i];
|
||||||
|
struct map *map;
|
||||||
|
|
||||||
|
pthread_rwlock_rdlock(&maps->lock);
|
||||||
|
|
||||||
|
for (map = maps__first(maps); map; map = map__next(map)) {
|
||||||
|
err = unwind__prepare_access(thread, map, &initialized);
|
||||||
|
if (err || initialized)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
pthread_rwlock_unlock(&maps->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int thread__prepare_access(struct thread *thread)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (symbol_conf.use_callchain)
|
||||||
|
err = __thread__prepare_access(thread);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static int thread__clone_map_groups(struct thread *thread,
|
static int thread__clone_map_groups(struct thread *thread,
|
||||||
struct thread *parent)
|
struct thread *parent)
|
||||||
{
|
{
|
||||||
|
@ -219,7 +252,7 @@ static int thread__clone_map_groups(struct thread *thread,
|
||||||
|
|
||||||
/* This is new thread, we share map groups for process. */
|
/* This is new thread, we share map groups for process. */
|
||||||
if (thread->pid_ == parent->pid_)
|
if (thread->pid_ == parent->pid_)
|
||||||
return 0;
|
return thread__prepare_access(thread);
|
||||||
|
|
||||||
if (thread->mg == parent->mg) {
|
if (thread->mg == parent->mg) {
|
||||||
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
|
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
|
||||||
|
@ -229,7 +262,7 @@ static int thread__clone_map_groups(struct thread *thread,
|
||||||
|
|
||||||
/* But this one is new process, copy maps. */
|
/* But this one is new process, copy maps. */
|
||||||
for (i = 0; i < MAP__NR_TYPES; ++i)
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
||||||
if (map_groups__clone(thread->mg, parent->mg, i) < 0)
|
if (map_groups__clone(thread, parent->mg, i) < 0)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Add table
Reference in a new issue