2005-04-17 02:20:36 +04:00
|
|
|
#ifndef __LINUX_PREEMPT_H
|
|
|
|
#define __LINUX_PREEMPT_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* include/linux/preempt.h - macros for accessing and manipulating
|
|
|
|
* preempt_count (used for kernel preemption, interrupt count, etc.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
2007-07-26 15:40:43 +04:00
|
|
|
#include <linux/list.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-08-14 16:55:31 +04:00
|
|
|
/*
|
|
|
|
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
|
|
|
|
* the other bits -- can't include that header due to inclusion hell.
|
|
|
|
*/
|
|
|
|
#define PREEMPT_NEED_RESCHED 0x80000000
|
|
|
|
|
2013-08-14 16:55:40 +04:00
|
|
|
#include <asm/preempt.h>
|
2013-08-14 16:55:31 +04:00
|
|
|
|
2008-05-12 23:20:42 +04:00
|
|
|
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
2013-09-10 14:15:23 +04:00
|
|
|
extern void preempt_count_add(int val);
|
|
|
|
extern void preempt_count_sub(int val);
|
|
|
|
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_count_add(val) __preempt_count_add(val)
|
|
|
|
#define preempt_count_sub(val) __preempt_count_sub(val)
|
|
|
|
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define __preempt_count_inc() __preempt_count_add(1)
|
|
|
|
#define __preempt_count_dec() __preempt_count_sub(1)
|
2011-06-08 03:13:27 +04:00
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_count_inc() preempt_count_add(1)
|
|
|
|
#define preempt_count_dec() preempt_count_sub(1)
|
2011-06-08 03:13:27 +04:00
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_COUNT
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#define preempt_disable() \
|
|
|
|
do { \
|
2013-09-10 14:15:23 +04:00
|
|
|
preempt_count_inc(); \
|
2005-04-17 02:20:36 +04:00
|
|
|
barrier(); \
|
|
|
|
} while (0)
|
|
|
|
|
2011-03-21 15:32:17 +03:00
|
|
|
#define sched_preempt_enable_no_resched() \
|
2005-04-17 02:20:36 +04:00
|
|
|
do { \
|
|
|
|
barrier(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
preempt_count_dec(); \
|
2005-04-17 02:20:36 +04:00
|
|
|
} while (0)
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
2011-03-21 15:32:17 +03:00
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#ifdef CONFIG_PREEMPT
|
2005-04-17 02:20:36 +04:00
|
|
|
#define preempt_enable() \
|
|
|
|
do { \
|
2013-09-10 14:15:23 +04:00
|
|
|
barrier(); \
|
|
|
|
if (unlikely(preempt_count_dec_and_test())) \
|
2013-08-14 16:51:00 +04:00
|
|
|
__preempt_schedule(); \
|
2005-04-17 02:20:36 +04:00
|
|
|
} while (0)
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_check_resched() \
|
|
|
|
do { \
|
|
|
|
if (should_resched()) \
|
2013-08-14 16:51:00 +04:00
|
|
|
__preempt_schedule(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else
|
2013-11-20 19:52:19 +04:00
|
|
|
#define preempt_enable() \
|
|
|
|
do { \
|
|
|
|
barrier(); \
|
|
|
|
preempt_count_dec(); \
|
|
|
|
} while (0)
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_check_resched() do { } while (0)
|
|
|
|
#endif
|
2008-05-12 23:20:41 +04:00
|
|
|
|
|
|
|
#define preempt_disable_notrace() \
|
|
|
|
do { \
|
2013-09-10 14:15:23 +04:00
|
|
|
__preempt_count_inc(); \
|
2008-05-12 23:20:41 +04:00
|
|
|
barrier(); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define preempt_enable_no_resched_notrace() \
|
|
|
|
do { \
|
|
|
|
barrier(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
__preempt_count_dec(); \
|
2008-05-12 23:20:41 +04:00
|
|
|
} while (0)
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
|
2013-08-14 16:51:00 +04:00
|
|
|
#ifndef CONFIG_CONTEXT_TRACKING
|
|
|
|
#define __preempt_schedule_context() __preempt_schedule()
|
2013-09-10 14:15:23 +04:00
|
|
|
#endif
|
|
|
|
|
2008-05-12 23:20:41 +04:00
|
|
|
#define preempt_enable_notrace() \
|
|
|
|
do { \
|
2013-09-10 14:15:23 +04:00
|
|
|
barrier(); \
|
|
|
|
if (unlikely(__preempt_count_dec_and_test())) \
|
2013-08-14 16:51:00 +04:00
|
|
|
__preempt_schedule_context(); \
|
2008-05-12 23:20:41 +04:00
|
|
|
} while (0)
|
2013-09-10 14:15:23 +04:00
|
|
|
#else
|
2013-11-20 19:52:19 +04:00
|
|
|
#define preempt_enable_notrace() \
|
|
|
|
do { \
|
|
|
|
barrier(); \
|
|
|
|
__preempt_count_dec(); \
|
|
|
|
} while (0)
|
2013-09-10 14:15:23 +04:00
|
|
|
#endif
|
2008-05-12 23:20:41 +04:00
|
|
|
|
2011-06-08 03:13:27 +04:00
|
|
|
#else /* !CONFIG_PREEMPT_COUNT */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
spinlocks and preemption points need to be at least compiler barriers
In UP and non-preempt respectively, the spinlocks and preemption
disable/enable points are stubbed out entirely, because there is no
regular code that can ever hit the kind of concurrency they are meant to
protect against.
However, while there is no regular code that can cause scheduling, we
_do_ end up having some exceptional (literally!) code that can do so,
and that we need to make sure does not ever get moved into the critical
region by the compiler.
In particular, get_user() and put_user() is generally implemented as
inline asm statements (even if the inline asm may then make a call
instruction to call out-of-line), and can obviously cause a page fault
and IO as a result. If that inline asm has been scheduled into the
middle of a preemption-safe (or spinlock-protected) code region, we
obviously lose.
Now, admittedly this is *very* unlikely to actually ever happen, and
we've not seen examples of actual bugs related to this. But partly
exactly because it's so hard to trigger and the resulting bug is so
subtle, we should be extra careful to get this right.
So make sure that even when preemption is disabled, and we don't have to
generate any actual *code* to explicitly tell the system that we are in
a preemption-disabled region, we need to at least tell the compiler not
to move things around the critical region.
This patch grew out of the same discussion that caused commits
79e5f05edcbf ("ARC: Add implicit compiler barrier to raw_local_irq*
functions") and 3e2e0d2c222b ("tile: comment assumption about
__insn_mtspr for <asm/irqflags.h>") to come about.
Note for stable: use discretion when/if applying this. As mentioned,
this bug may never have actually bitten anybody, and gcc may never have
done the required code motion for it to possibly ever trigger in
practice.
Cc: stable@vger.kernel.org
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-09 21:48:33 +04:00
|
|
|
/*
|
|
|
|
* Even if we don't have any preemption, we need preempt disable/enable
|
|
|
|
* to be barriers, so that we don't have things like get_user/put_user
|
|
|
|
* that can cause faults and scheduling migrate into our preempt-protected
|
|
|
|
* region.
|
|
|
|
*/
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_disable() barrier()
|
spinlocks and preemption points need to be at least compiler barriers
In UP and non-preempt respectively, the spinlocks and preemption
disable/enable points are stubbed out entirely, because there is no
regular code that can ever hit the kind of concurrency they are meant to
protect against.
However, while there is no regular code that can cause scheduling, we
_do_ end up having some exceptional (literally!) code that can do so,
and that we need to make sure does not ever get moved into the critical
region by the compiler.
In particular, get_user() and put_user() is generally implemented as
inline asm statements (even if the inline asm may then make a call
instruction to call out-of-line), and can obviously cause a page fault
and IO as a result. If that inline asm has been scheduled into the
middle of a preemption-safe (or spinlock-protected) code region, we
obviously lose.
Now, admittedly this is *very* unlikely to actually ever happen, and
we've not seen examples of actual bugs related to this. But partly
exactly because it's so hard to trigger and the resulting bug is so
subtle, we should be extra careful to get this right.
So make sure that even when preemption is disabled, and we don't have to
generate any actual *code* to explicitly tell the system that we are in
a preemption-disabled region, we need to at least tell the compiler not
to move things around the critical region.
This patch grew out of the same discussion that caused commits
79e5f05edcbf ("ARC: Add implicit compiler barrier to raw_local_irq*
functions") and 3e2e0d2c222b ("tile: comment assumption about
__insn_mtspr for <asm/irqflags.h>") to come about.
Note for stable: use discretion when/if applying this. As mentioned,
this bug may never have actually bitten anybody, and gcc may never have
done the required code motion for it to possibly ever trigger in
practice.
Cc: stable@vger.kernel.org
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-09 21:48:33 +04:00
|
|
|
#define sched_preempt_enable_no_resched() barrier()
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_enable_no_resched() barrier()
|
|
|
|
#define preempt_enable() barrier()
|
|
|
|
#define preempt_check_resched() do { } while (0)
|
spinlocks and preemption points need to be at least compiler barriers
In UP and non-preempt respectively, the spinlocks and preemption
disable/enable points are stubbed out entirely, because there is no
regular code that can ever hit the kind of concurrency they are meant to
protect against.
However, while there is no regular code that can cause scheduling, we
_do_ end up having some exceptional (literally!) code that can do so,
and that we need to make sure does not ever get moved into the critical
region by the compiler.
In particular, get_user() and put_user() is generally implemented as
inline asm statements (even if the inline asm may then make a call
instruction to call out-of-line), and can obviously cause a page fault
and IO as a result. If that inline asm has been scheduled into the
middle of a preemption-safe (or spinlock-protected) code region, we
obviously lose.
Now, admittedly this is *very* unlikely to actually ever happen, and
we've not seen examples of actual bugs related to this. But partly
exactly because it's so hard to trigger and the resulting bug is so
subtle, we should be extra careful to get this right.
So make sure that even when preemption is disabled, and we don't have to
generate any actual *code* to explicitly tell the system that we are in
a preemption-disabled region, we need to at least tell the compiler not
to move things around the critical region.
This patch grew out of the same discussion that caused commits
79e5f05edcbf ("ARC: Add implicit compiler barrier to raw_local_irq*
functions") and 3e2e0d2c222b ("tile: comment assumption about
__insn_mtspr for <asm/irqflags.h>") to come about.
Note for stable: use discretion when/if applying this. As mentioned,
this bug may never have actually bitten anybody, and gcc may never have
done the required code motion for it to possibly ever trigger in
practice.
Cc: stable@vger.kernel.org
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-09 21:48:33 +04:00
|
|
|
|
|
|
|
#define preempt_disable_notrace() barrier()
|
|
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
|
|
#define preempt_enable_notrace() barrier()
|
2008-05-12 23:20:41 +04:00
|
|
|
|
2011-06-08 03:13:27 +04:00
|
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-11-20 19:52:19 +04:00
|
|
|
#ifdef MODULE
|
|
|
|
/*
|
|
|
|
* Modules have no business playing preemption tricks.
|
|
|
|
*/
|
|
|
|
#undef sched_preempt_enable_no_resched
|
|
|
|
#undef preempt_enable_no_resched
|
|
|
|
#undef preempt_enable_no_resched_notrace
|
|
|
|
#undef preempt_check_resched
|
|
|
|
#endif
|
|
|
|
|
2013-11-20 15:22:37 +04:00
|
|
|
#define preempt_set_need_resched() \
|
|
|
|
do { \
|
|
|
|
set_preempt_need_resched(); \
|
|
|
|
} while (0)
|
|
|
|
#define preempt_fold_need_resched() \
|
|
|
|
do { \
|
|
|
|
if (tif_need_resched()) \
|
|
|
|
set_preempt_need_resched(); \
|
|
|
|
} while (0)
|
|
|
|
|
2007-07-26 15:40:43 +04:00
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
|
|
|
|
struct preempt_notifier;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* preempt_ops - notifiers called when a task is preempted and rescheduled
|
|
|
|
* @sched_in: we're about to be rescheduled:
|
|
|
|
* notifier: struct preempt_notifier for the task being scheduled
|
|
|
|
* cpu: cpu we're scheduled on
|
|
|
|
* @sched_out: we've just been preempted
|
|
|
|
* notifier: struct preempt_notifier for the task being preempted
|
|
|
|
* next: the task that's kicking us out
|
2009-12-02 06:56:46 +03:00
|
|
|
*
|
|
|
|
* Please note that sched_in and out are called under different
|
|
|
|
* contexts. sched_out is called with rq lock held and irq disabled
|
|
|
|
* while sched_in is called without rq lock and irq enabled. This
|
|
|
|
* difference is intentional and depended upon by its users.
|
2007-07-26 15:40:43 +04:00
|
|
|
*/
|
|
|
|
struct preempt_ops {
|
|
|
|
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
|
|
|
|
void (*sched_out)(struct preempt_notifier *notifier,
|
|
|
|
struct task_struct *next);
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* preempt_notifier - key for installing preemption notifiers
|
|
|
|
* @link: internal use
|
|
|
|
* @ops: defines the notifier functions to be called
|
|
|
|
*
|
|
|
|
* Usually used in conjunction with container_of().
|
|
|
|
*/
|
|
|
|
struct preempt_notifier {
|
|
|
|
struct hlist_node link;
|
|
|
|
struct preempt_ops *ops;
|
|
|
|
};
|
|
|
|
|
|
|
|
void preempt_notifier_register(struct preempt_notifier *notifier);
|
|
|
|
void preempt_notifier_unregister(struct preempt_notifier *notifier);
|
|
|
|
|
|
|
|
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
|
|
|
|
struct preempt_ops *ops)
|
|
|
|
{
|
|
|
|
INIT_HLIST_NODE(¬ifier->link);
|
|
|
|
notifier->ops = ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* __LINUX_PREEMPT_H */
|