summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2009-05-06 10:33:45 +0800
committerIngo Molnar <mingo@elte.hu>2009-05-06 10:38:19 +0200
commit20c8928abe70e204bd077ab6cfe23002d7788983 (patch)
treee161656f99c814ebdd69df8b5a79dab58f80065e /kernel/trace/trace_events.c
parent2df75e415709ad12862028916c772c1f377f6a7c (diff)
tracing/events: fix concurrent access to ftrace_events list
A module will add/remove its trace events when it gets loaded/unloaded, so the ftrace_events list is not "const", and concurrent access needs to be protected. This patch thus fixes races between loading/unloding modules and read 'available_events' or read/write 'set_event', etc. Below shows how to reproduce the race: # for ((; ;)) { cat /mnt/tracing/available_events; } > /dev/null & # for ((; ;)) { insmod trace-events-sample.ko; rmmod sample; } & After a while: BUG: unable to handle kernel paging request at 0010011c IP: [<c1080f27>] t_next+0x1b/0x2d ... Call Trace: [<c10c90e6>] ? seq_read+0x217/0x30d [<c10c8ecf>] ? seq_read+0x0/0x30d [<c10b4c19>] ? vfs_read+0x8f/0x136 [<c10b4fc3>] ? sys_read+0x40/0x65 [<c1002a68>] ? sysenter_do_call+0x12/0x36 [ Impact: fix races when concurrent accessing ftrace_events list ] Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <4A00F709.3080800@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f251a150e75e..8d579ff23610 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -21,7 +21,7 @@
#define TRACE_SYSTEM "TRACE_SYSTEM"
-static DEFINE_MUTEX(event_mutex);
+DEFINE_MUTEX(event_mutex);
LIST_HEAD(ftrace_events);
@@ -80,6 +80,7 @@ static void ftrace_clear_events(void)
{
struct ftrace_event_call *call;
+ mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) {
if (call->enabled) {
@@ -87,6 +88,7 @@ static void ftrace_clear_events(void)
call->unregfunc();
}
}
+ mutex_unlock(&event_mutex);
}
static void ftrace_event_enable_disable(struct ftrace_event_call *call,
@@ -274,6 +276,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
+ mutex_lock(&event_mutex);
+ if (*pos == 0)
+ m->private = ftrace_events.next;
return t_next(m, NULL, pos);
}
@@ -303,6 +308,9 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
static void *s_start(struct seq_file *m, loff_t *pos)
{
+ mutex_lock(&event_mutex);
+ if (*pos == 0)
+ m->private = ftrace_events.next;
return s_next(m, NULL, pos);
}
@@ -319,12 +327,12 @@ static int t_show(struct seq_file *m, void *v)
static void t_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&event_mutex);
}
static int
ftrace_event_seq_open(struct inode *inode, struct file *file)
{
- int ret;
const struct seq_operations *seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
@@ -332,13 +340,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file)
ftrace_clear_events();
seq_ops = inode->i_private;
- ret = seq_open(file, seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
-
- m->private = ftrace_events.next;
- }
- return ret;
+ return seq_open(file, seq_ops);
}
static ssize_t