20c8928abe
A module will add/remove its trace events when it gets loaded/unloaded, so the ftrace_events list is not "const", and concurrent access needs to be protected. This patch thus fixes races between loading/unloding modules and read 'available_events' or read/write 'set_event', etc. Below shows how to reproduce the race: # for ((; ;)) { cat /mnt/tracing/available_events; } > /dev/null & # for ((; ;)) { insmod trace-events-sample.ko; rmmod sample; } & After a while: BUG: unable to handle kernel paging request at 0010011c IP: [<c1080f27>] t_next+0x1b/0x2d ... Call Trace: [<c10c90e6>] ? seq_read+0x217/0x30d [<c10c8ecf>] ? seq_read+0x0/0x30d [<c10b4c19>] ? vfs_read+0x8f/0x136 [<c10b4fc3>] ? sys_read+0x40/0x65 [<c1002a68>] ? sysenter_do_call+0x12/0x36 [ Impact: fix races when concurrent accessing ftrace_events list ] Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <4A00F709.3080800@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
40 lines
730 B
C
40 lines
730 B
C
/*
|
|
* trace event based perf counter profiling
|
|
*
|
|
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
|
|
*
|
|
*/
|
|
|
|
#include "trace.h"
|
|
|
|
int ftrace_profile_enable(int event_id)
|
|
{
|
|
struct ftrace_event_call *event;
|
|
int ret = -EINVAL;
|
|
|
|
mutex_lock(&event_mutex);
|
|
list_for_each_entry(event, &ftrace_events, list) {
|
|
if (event->id == event_id) {
|
|
ret = event->profile_enable(event);
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&event_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void ftrace_profile_disable(int event_id)
|
|
{
|
|
struct ftrace_event_call *event;
|
|
|
|
mutex_lock(&event_mutex);
|
|
list_for_each_entry(event, &ftrace_events, list) {
|
|
if (event->id == event_id) {
|
|
event->profile_disable(event);
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&event_mutex);
|
|
}
|