ring-buffer: Make it generally available
In hunting down the cause for the hwlat_detector ring buffer spew in my failed -next builds it became obvious that folks are now treating ring_buffer as something that is generic independent of tracing and thus, suitable for public driver consumption. Given that there are only a few minor areas in ring_buffer that have any reliance on CONFIG_TRACING or CONFIG_FUNCTION_TRACER, provide stubs for those and make it generally available. Signed-off-by: Paul Mundt <lethal@linux-sh.org> Cc: Jon Masters <jcm@jonmasters.org> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <20090625053012.GB19944@linux-sh.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
00e54d087a
Коммит
1155de47cd
|
@ -96,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
|
|||
obj-$(CONFIG_FUNCTION_TRACER) += trace/
|
||||
obj-$(CONFIG_TRACING) += trace/
|
||||
obj-$(CONFIG_X86_DS) += trace/
|
||||
obj-$(CONFIG_RING_BUFFER) += trace/
|
||||
obj-$(CONFIG_SMP) += sched_cpupri.o
|
||||
obj-$(CONFIG_SLOW_WORK) += slow-work.o
|
||||
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
|
||||
|
|
|
@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
|
||||
#define TRACE_RECURSIVE_DEPTH 16
|
||||
|
||||
static int trace_recursive_lock(void)
|
||||
|
@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void)
|
|||
current->trace_recursion--;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define trace_recursive_lock() (0)
|
||||
#define trace_recursive_unlock() do { } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
static DEFINE_PER_CPU(int, rb_need_resched);
|
||||
|
||||
/**
|
||||
|
@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
static ssize_t
|
||||
rb_simple_read(struct file *filp, char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
|
@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void)
|
|||
}
|
||||
|
||||
fs_initcall(rb_init_debugfs);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int rb_cpu_notify(struct notifier_block *self,
|
||||
|
|
|
@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter)
|
|||
|
||||
extern struct pid *ftrace_pid_trace;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
static inline int ftrace_trace_task(struct task_struct *task)
|
||||
{
|
||||
if (!ftrace_pid_trace)
|
||||
|
@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
|
|||
|
||||
return test_tsk_trace_trace(task);
|
||||
}
|
||||
#else
|
||||
static inline int ftrace_trace_task(struct task_struct *task)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* trace_iterator_flags is an enumeration that defines bit
|
||||
|
|
Загрузка…
Ссылка в новой задаче