summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-01-29 14:44:34 +0100
committerIngo Molnar <mingo@kernel.org>2015-02-04 08:07:12 +0100
commita83fe28e2e45392464858a96745db26ac73670c8 (patch)
treef5512a81ef46a14319e1600079b36e9cea11ee90 /kernel
parent8f95b435b62522aed3381aaea920de8d09ccabf3 (diff)
perf: Fix put_event() ctx lock
So what I suspect; but I'm in zombie mode today it seems; is that while I initially thought that it was impossible for ctx to change when refcount dropped to 0, I now suspect its possible. Note that until perf_remove_from_context() the event is still active and visible on the lists. So a concurrent sys_perf_event_open() from another task into this task can race. Reported-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Stephane Eranian <eranian@gmail.com> Cc: mark.rutland@arm.com Cc: Jiri Olsa <jolsa@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20150129134434.GB26304@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 142dbabc1615..f773fa13d7c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -947,7 +947,8 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event::mmap_mutex
* mmap_sem
*/
-static struct perf_event_context *perf_event_ctx_lock(struct perf_event *event)
+static struct perf_event_context *
+perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
{
struct perf_event_context *ctx;
@@ -960,7 +961,7 @@ again:
}
rcu_read_unlock();
- mutex_lock(&ctx->mutex);
+ mutex_lock_nested(&ctx->mutex, nesting);
if (event->ctx != ctx) {
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
@@ -970,6 +971,12 @@ again:
return ctx;
}
+static inline struct perf_event_context *
+perf_event_ctx_lock(struct perf_event *event)
+{
+ return perf_event_ctx_lock_nested(event, 0);
+}
+
static void perf_event_ctx_unlock(struct perf_event *event,
struct perf_event_context *ctx)
{
@@ -3572,7 +3579,7 @@ static void perf_remove_from_owner(struct perf_event *event)
*/
static void put_event(struct perf_event *event)
{
- struct perf_event_context *ctx = event->ctx;
+ struct perf_event_context *ctx;
if (!atomic_long_dec_and_test(&event->refcount))
return;
@@ -3580,7 +3587,6 @@ static void put_event(struct perf_event *event)
if (!is_kernel_event(event))
perf_remove_from_owner(event);
- WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
@@ -3593,7 +3599,8 @@ static void put_event(struct perf_event *event)
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
- mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
+ ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
+ WARN_ON_ONCE(ctx->parent_ctx);
perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);