Merge branch 'tip/perf/core-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
This commit is contained in:
Коммит
6113e45f83
|
@ -3730,7 +3730,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
||||||
* off of preempt_enable. Kernel preemptions off return from interrupt
|
* off of preempt_enable. Kernel preemptions off return from interrupt
|
||||||
* occur there and call schedule directly.
|
* occur there and call schedule directly.
|
||||||
*/
|
*/
|
||||||
asmlinkage void __sched preempt_schedule(void)
|
asmlinkage void __sched notrace preempt_schedule(void)
|
||||||
{
|
{
|
||||||
struct thread_info *ti = current_thread_info();
|
struct thread_info *ti = current_thread_info();
|
||||||
|
|
||||||
|
@ -3742,9 +3742,9 @@ asmlinkage void __sched preempt_schedule(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
add_preempt_count(PREEMPT_ACTIVE);
|
add_preempt_count_notrace(PREEMPT_ACTIVE);
|
||||||
schedule();
|
schedule();
|
||||||
sub_preempt_count(PREEMPT_ACTIVE);
|
sub_preempt_count_notrace(PREEMPT_ACTIVE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check again in case we missed a preemption opportunity
|
* Check again in case we missed a preemption opportunity
|
||||||
|
|
|
@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
|
||||||
struct hlist_head *hhd;
|
struct hlist_head *hhd;
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
unsigned long key;
|
unsigned long key;
|
||||||
int resched;
|
|
||||||
|
|
||||||
key = hash_long(ip, FTRACE_HASH_BITS);
|
key = hash_long(ip, FTRACE_HASH_BITS);
|
||||||
|
|
||||||
|
@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
|
||||||
* period. This syncs the hash iteration and freeing of items
|
* period. This syncs the hash iteration and freeing of items
|
||||||
* on the hash. rcu_read_lock is too dangerous here.
|
* on the hash. rcu_read_lock is too dangerous here.
|
||||||
*/
|
*/
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
hlist_for_each_entry_rcu(entry, n, hhd, node) {
|
hlist_for_each_entry_rcu(entry, n, hhd, node) {
|
||||||
if (entry->ip == ip)
|
if (entry->ip == ip)
|
||||||
entry->ops->func(ip, parent_ip, &entry->data);
|
entry->ops->func(ip, parent_ip, &entry->data);
|
||||||
}
|
}
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ftrace_ops trace_probe_ops __read_mostly =
|
static struct ftrace_ops trace_probe_ops __read_mostly =
|
||||||
|
|
|
@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, rb_need_resched);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ring_buffer_lock_reserve - reserve a part of the buffer
|
* ring_buffer_lock_reserve - reserve a part of the buffer
|
||||||
* @buffer: the ring buffer to reserve from
|
* @buffer: the ring buffer to reserve from
|
||||||
|
@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
||||||
{
|
{
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
int cpu, resched;
|
int cpu;
|
||||||
|
|
||||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* If we are tracing schedule, we don't want to recurse */
|
/* If we are tracing schedule, we don't want to recurse */
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
if (atomic_read(&buffer->record_disabled))
|
if (atomic_read(&buffer->record_disabled))
|
||||||
goto out_nocheck;
|
goto out_nocheck;
|
||||||
|
@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
||||||
if (!event)
|
if (!event)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
|
||||||
* Need to store resched state on this cpu.
|
|
||||||
* Only the first needs to.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (preempt_count() == 1)
|
|
||||||
per_cpu(rb_need_resched, cpu) = resched;
|
|
||||||
|
|
||||||
return event;
|
return event;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
trace_recursive_unlock();
|
trace_recursive_unlock();
|
||||||
|
|
||||||
out_nocheck:
|
out_nocheck:
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
|
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
|
||||||
|
@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
|
||||||
|
|
||||||
trace_recursive_unlock();
|
trace_recursive_unlock();
|
||||||
|
|
||||||
/*
|
preempt_enable_notrace();
|
||||||
* Only the last preempt count needs to restore preemption.
|
|
||||||
*/
|
|
||||||
if (preempt_count() == 1)
|
|
||||||
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
|
|
||||||
else
|
|
||||||
preempt_enable_no_resched_notrace();
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
|
||||||
|
|
||||||
trace_recursive_unlock();
|
trace_recursive_unlock();
|
||||||
|
|
||||||
/*
|
preempt_enable_notrace();
|
||||||
* Only the last preempt count needs to restore preemption.
|
|
||||||
*/
|
|
||||||
if (preempt_count() == 1)
|
|
||||||
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
|
|
||||||
else
|
|
||||||
preempt_enable_no_resched_notrace();
|
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
|
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
|
||||||
|
@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
void *body;
|
void *body;
|
||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
int cpu, resched;
|
int cpu;
|
||||||
|
|
||||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
if (atomic_read(&buffer->record_disabled))
|
if (atomic_read(&buffer->record_disabled))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1404,7 +1404,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||||
struct bprint_entry *entry;
|
struct bprint_entry *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int disable;
|
int disable;
|
||||||
int resched;
|
|
||||||
int cpu, len = 0, size, pc;
|
int cpu, len = 0, size, pc;
|
||||||
|
|
||||||
if (unlikely(tracing_selftest_running || tracing_disabled))
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
||||||
|
@ -1414,7 +1413,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||||
pause_graph_tracing();
|
pause_graph_tracing();
|
||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
data = tr->data[cpu];
|
data = tr->data[cpu];
|
||||||
|
|
||||||
|
@ -1452,7 +1451,7 @@ out_unlock:
|
||||||
|
|
||||||
out:
|
out:
|
||||||
atomic_dec_return(&data->disabled);
|
atomic_dec_return(&data->disabled);
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
unpause_graph_tracing();
|
unpause_graph_tracing();
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
|
|
|
@ -628,54 +628,6 @@ enum trace_iterator_flags {
|
||||||
|
|
||||||
extern struct tracer nop_trace;
|
extern struct tracer nop_trace;
|
||||||
|
|
||||||
/**
|
|
||||||
* ftrace_preempt_disable - disable preemption scheduler safe
|
|
||||||
*
|
|
||||||
* When tracing can happen inside the scheduler, there exists
|
|
||||||
* cases that the tracing might happen before the need_resched
|
|
||||||
* flag is checked. If this happens and the tracer calls
|
|
||||||
* preempt_enable (after a disable), a schedule might take place
|
|
||||||
* causing an infinite recursion.
|
|
||||||
*
|
|
||||||
* To prevent this, we read the need_resched flag before
|
|
||||||
* disabling preemption. When we want to enable preemption we
|
|
||||||
* check the flag, if it is set, then we call preempt_enable_no_resched.
|
|
||||||
* Otherwise, we call preempt_enable.
|
|
||||||
*
|
|
||||||
* The rational for doing the above is that if need_resched is set
|
|
||||||
* and we have yet to reschedule, we are either in an atomic location
|
|
||||||
* (where we do not need to check for scheduling) or we are inside
|
|
||||||
* the scheduler and do not want to resched.
|
|
||||||
*/
|
|
||||||
static inline int ftrace_preempt_disable(void)
|
|
||||||
{
|
|
||||||
int resched;
|
|
||||||
|
|
||||||
resched = need_resched();
|
|
||||||
preempt_disable_notrace();
|
|
||||||
|
|
||||||
return resched;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ftrace_preempt_enable - enable preemption scheduler safe
|
|
||||||
* @resched: the return value from ftrace_preempt_disable
|
|
||||||
*
|
|
||||||
* This is a scheduler safe way to enable preemption and not miss
|
|
||||||
* any preemption checks. The disabled saved the state of preemption.
|
|
||||||
* If resched is set, then we are either inside an atomic or
|
|
||||||
* are inside the scheduler (we would have already scheduled
|
|
||||||
* otherwise). In this case, we do not want to call normal
|
|
||||||
* preempt_enable, but preempt_enable_no_resched instead.
|
|
||||||
*/
|
|
||||||
static inline void ftrace_preempt_enable(int resched)
|
|
||||||
{
|
|
||||||
if (resched)
|
|
||||||
preempt_enable_no_resched_notrace();
|
|
||||||
else
|
|
||||||
preempt_enable_notrace();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BRANCH_TRACER
|
#ifdef CONFIG_BRANCH_TRACER
|
||||||
extern int enable_branch_tracing(struct trace_array *tr);
|
extern int enable_branch_tracing(struct trace_array *tr);
|
||||||
extern void disable_branch_tracing(void);
|
extern void disable_branch_tracing(void);
|
||||||
|
|
|
@ -32,16 +32,15 @@
|
||||||
u64 notrace trace_clock_local(void)
|
u64 notrace trace_clock_local(void)
|
||||||
{
|
{
|
||||||
u64 clock;
|
u64 clock;
|
||||||
int resched;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sched_clock() is an architecture implemented, fast, scalable,
|
* sched_clock() is an architecture implemented, fast, scalable,
|
||||||
* lockless clock. It is not guaranteed to be coherent across
|
* lockless clock. It is not guaranteed to be coherent across
|
||||||
* CPUs, nor across CPU idle events.
|
* CPUs, nor across CPU idle events.
|
||||||
*/
|
*/
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
clock = sched_clock();
|
clock = sched_clock();
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
|
|
||||||
return clock;
|
return clock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1524,12 +1524,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
|
||||||
struct ftrace_entry *entry;
|
struct ftrace_entry *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
long disabled;
|
long disabled;
|
||||||
int resched;
|
|
||||||
int cpu;
|
int cpu;
|
||||||
int pc;
|
int pc;
|
||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
|
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
|
||||||
|
|
||||||
|
@ -1551,7 +1550,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
|
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ftrace_ops trace_ops __initdata =
|
static struct ftrace_ops trace_ops __initdata =
|
||||||
|
|
|
@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
||||||
struct trace_array_cpu *data;
|
struct trace_array_cpu *data;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
long disabled;
|
long disabled;
|
||||||
int cpu, resched;
|
int cpu;
|
||||||
int pc;
|
int pc;
|
||||||
|
|
||||||
if (unlikely(!ftrace_function_enabled))
|
if (unlikely(!ftrace_function_enabled))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
data = tr->data[cpu];
|
data = tr->data[cpu];
|
||||||
|
@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
||||||
trace_function(tr, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -46,7 +46,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||||
struct trace_array_cpu *data;
|
struct trace_array_cpu *data;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
long disabled;
|
long disabled;
|
||||||
int resched;
|
|
||||||
int cpu;
|
int cpu;
|
||||||
int pc;
|
int pc;
|
||||||
|
|
||||||
|
@ -54,7 +53,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
if (cpu != wakeup_current_cpu)
|
if (cpu != wakeup_current_cpu)
|
||||||
|
@ -74,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||||
out:
|
out:
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
out_enable:
|
out_enable:
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ftrace_ops trace_ops __read_mostly =
|
static struct ftrace_ops trace_ops __read_mostly =
|
||||||
|
|
|
@ -110,12 +110,12 @@ static inline void check_stack(void)
|
||||||
static void
|
static void
|
||||||
stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||||
{
|
{
|
||||||
int cpu, resched;
|
int cpu;
|
||||||
|
|
||||||
if (unlikely(!ftrace_enabled || stack_trace_disabled))
|
if (unlikely(!ftrace_enabled || stack_trace_disabled))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
resched = ftrace_preempt_disable();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
/* no atomic needed, we only modify this variable by this cpu */
|
/* no atomic needed, we only modify this variable by this cpu */
|
||||||
|
@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||||
out:
|
out:
|
||||||
per_cpu(trace_active, cpu)--;
|
per_cpu(trace_active, cpu)--;
|
||||||
/* prevent recursion in schedule */
|
/* prevent recursion in schedule */
|
||||||
ftrace_preempt_enable(resched);
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ftrace_ops trace_ops __read_mostly =
|
static struct ftrace_ops trace_ops __read_mostly =
|
||||||
|
|
Загрузка…
Ссылка в новой задаче