perf symbols: Protect dso cache tree using dso->lock

The dso cache is accessed during dwarf callchain unwind and it might be
processed concurrently.  Protect it under dso->lock.

Note that it doesn't protect dso_cache__find().  I think it's safe to
access to the cache tree without the lock since we don't delete nodes.

It it missed an existing node due to rotation, it'll find it during
dso_cache__insert() anyway.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1431909055-21442-27-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Namhyung Kim 2015-05-18 09:30:41 +09:00 коммит произвёл Arnaldo Carvalho de Melo
Родитель 4a936edc31
Коммит 8e67b7258e
1 изменённых файлов: 27 добавлений и 7 удалений

Просмотреть файл

@ -495,10 +495,12 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
}
static void
dso_cache__free(struct rb_root *root)
dso_cache__free(struct dso *dso)
{
struct rb_root *root = &dso->data.cache;
struct rb_node *next = rb_first(root);
pthread_mutex_lock(&dso->lock);
while (next) {
struct dso_cache *cache;
@ -507,10 +509,12 @@ dso_cache__free(struct rb_root *root)
rb_erase(&cache->rb_node, root);
free(cache);
}
pthread_mutex_unlock(&dso->lock);
}
static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
const struct rb_node *parent = NULL;
struct dso_cache *cache;
@ -529,17 +533,20 @@ static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
else
return cache;
}
return NULL;
}
static void
dso_cache__insert(struct rb_root *root, struct dso_cache *new)
static struct dso_cache *
dso_cache__insert(struct dso *dso, struct dso_cache *new)
{
struct rb_root *root = &dso->data.cache;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct dso_cache *cache;
u64 offset = new->offset;
pthread_mutex_lock(&dso->lock);
while (*p != NULL) {
u64 end;
@ -551,10 +558,17 @@ dso_cache__insert(struct rb_root *root, struct dso_cache *new)
p = &(*p)->rb_left;
else if (offset >= end)
p = &(*p)->rb_right;
else
goto out;
}
rb_link_node(&new->rb_node, parent, p);
rb_insert_color(&new->rb_node, root);
cache = NULL;
out:
pthread_mutex_unlock(&dso->lock);
return cache;
}
static ssize_t
@ -572,6 +586,7 @@ static ssize_t
dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
{
struct dso_cache *cache;
struct dso_cache *old;
ssize_t ret;
do {
@ -591,7 +606,12 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
cache->offset = cache_offset;
cache->size = ret;
dso_cache__insert(&dso->data.cache, cache);
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
ret = dso_cache__memcpy(cache, offset, data, size);
@ -608,7 +628,7 @@ static ssize_t dso_cache_read(struct dso *dso, u64 offset,
{
struct dso_cache *cache;
cache = dso_cache__find(&dso->data.cache, offset);
cache = dso_cache__find(dso, offset);
if (cache)
return dso_cache__memcpy(cache, offset, data, size);
else
@ -964,7 +984,7 @@ void dso__delete(struct dso *dso)
dso__data_close(dso);
auxtrace_cache__free(dso->auxtrace_cache);
dso_cache__free(&dso->data.cache);
dso_cache__free(dso);
dso__free_a2l(dso);
zfree(&dso->symsrc_filename);
pthread_mutex_destroy(&dso->lock);