2005-04-17 02:20:36 +04:00
|
|
|
#ifndef __LINUX_PREEMPT_H
|
|
|
|
#define __LINUX_PREEMPT_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* include/linux/preempt.h - macros for accessing and manipulating
|
|
|
|
* preempt_count (used for kernel preemption, interrupt count, etc.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
2007-07-26 15:40:43 +04:00
|
|
|
#include <linux/list.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
/*
|
|
|
|
* We put the hardirq and softirq counter into the preemption
|
|
|
|
* counter. The bitmask has the following meaning:
|
|
|
|
*
|
|
|
|
* - bits 0-7 are the preemption count (max preemption depth: 256)
|
|
|
|
* - bits 8-15 are the softirq count (max # of softirqs: 256)
|
|
|
|
*
|
|
|
|
* The hardirq count could in theory be the same as the number of
|
|
|
|
* interrupts in the system, but we run all interrupt handlers with
|
|
|
|
* interrupts disabled, so we cannot have nesting interrupts. Though
|
|
|
|
* there are a few palaeontologic drivers which reenable interrupts in
|
|
|
|
* the handler, so we need more than one bit here.
|
|
|
|
*
|
2015-05-12 17:41:47 +03:00
|
|
|
* PREEMPT_MASK: 0x000000ff
|
|
|
|
* SOFTIRQ_MASK: 0x0000ff00
|
|
|
|
* HARDIRQ_MASK: 0x000f0000
|
|
|
|
* NMI_MASK: 0x00100000
|
|
|
|
* PREEMPT_NEED_RESCHED: 0x80000000
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
*/
|
|
|
|
#define PREEMPT_BITS 8
|
|
|
|
#define SOFTIRQ_BITS 8
|
|
|
|
#define HARDIRQ_BITS 4
|
|
|
|
#define NMI_BITS 1
|
|
|
|
|
|
|
|
#define PREEMPT_SHIFT 0
|
|
|
|
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
|
|
|
|
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
|
|
|
|
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
|
|
|
|
|
|
|
|
#define __IRQ_MASK(x) ((1UL << (x))-1)
|
|
|
|
|
|
|
|
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
|
|
|
|
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
|
|
|
|
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
|
|
|
|
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
|
|
|
|
|
|
|
|
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
|
|
|
|
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
|
|
|
|
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
|
|
|
|
#define NMI_OFFSET (1UL << NMI_SHIFT)
|
|
|
|
|
|
|
|
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
|
|
|
2015-05-12 17:41:47 +03:00
|
|
|
/* We use the MSB mostly because its available */
|
|
|
|
#define PREEMPT_NEED_RESCHED 0x80000000
|
|
|
|
|
|
|
|
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
|
|
|
|
#include <asm/preempt.h>
|
|
|
|
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
|
|
|
|
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
|
|
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
|
|
|
|
| NMI_MASK))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we doing bottom half or hardware interrupt processing?
|
2016-11-22 12:57:15 +03:00
|
|
|
*
|
|
|
|
* in_irq() - We're in (hard) IRQ context
|
|
|
|
* in_softirq() - We have BH disabled, or are processing softirqs
|
|
|
|
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
|
|
|
|
* in_serving_softirq() - We're in softirq context
|
|
|
|
* in_nmi() - We're in NMI context
|
|
|
|
* in_task() - We're in task context
|
|
|
|
*
|
|
|
|
* Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
|
|
|
|
* should not be used in new code.
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
*/
|
|
|
|
#define in_irq() (hardirq_count())
|
|
|
|
#define in_softirq() (softirq_count())
|
|
|
|
#define in_interrupt() (irq_count())
|
|
|
|
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
2016-11-22 12:57:15 +03:00
|
|
|
#define in_nmi() (preempt_count() & NMI_MASK)
|
|
|
|
#define in_task() (!(preempt_count() & \
|
|
|
|
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
|
2015-07-15 12:52:04 +03:00
|
|
|
/*
|
|
|
|
* The preempt_count offset after preempt_disable();
|
|
|
|
*/
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
#if defined(CONFIG_PREEMPT_COUNT)
|
2015-07-15 12:52:04 +03:00
|
|
|
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
#else
|
2015-07-15 12:52:04 +03:00
|
|
|
# define PREEMPT_DISABLE_OFFSET 0
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
#endif
|
|
|
|
|
2015-07-15 12:52:04 +03:00
|
|
|
/*
|
|
|
|
* The preempt_count offset after spin_lock()
|
|
|
|
*/
|
|
|
|
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
|
|
|
|
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
/*
|
|
|
|
* The preempt_count offset needed for things like:
|
|
|
|
*
|
|
|
|
* spin_lock_bh()
|
|
|
|
*
|
|
|
|
* Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
|
|
|
|
* softirqs, such that unlock sequences of:
|
|
|
|
*
|
|
|
|
* spin_unlock();
|
|
|
|
* local_bh_enable();
|
|
|
|
*
|
|
|
|
* Work as expected.
|
|
|
|
*/
|
2015-07-15 12:52:04 +03:00
|
|
|
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we running in atomic context? WARNING: this macro cannot
|
|
|
|
* always detect atomic context; in particular, it cannot know about
|
|
|
|
* held spinlocks in non-preemptible kernels. Thus it should not be
|
|
|
|
* used in the general case to determine whether sleeping is possible.
|
|
|
|
* Do not use in_atomic() in driver code.
|
|
|
|
*/
|
2015-05-12 17:41:51 +03:00
|
|
|
#define in_atomic() (preempt_count() != 0)
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether we were atomic before we did preempt_disable():
|
2015-05-12 17:41:50 +03:00
|
|
|
* (used by the scheduler)
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
*/
|
2015-09-28 19:11:45 +03:00
|
|
|
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
|
sched/preempt: Merge preempt_mask.h into preempt.h
preempt_mask.h defines all the preempt_count semantics and related
symbols: preempt, softirq, hardirq, nmi, preempt active, need resched,
etc...
preempt.h defines the accessors and mutators of preempt_count.
But there is a messy dependency game around those two header files:
* preempt_mask.h includes preempt.h in order to access preempt_count()
* preempt_mask.h defines all preempt_count semantic and symbols
except PREEMPT_NEED_RESCHED that is needed by asm/preempt.h
Thus we need to define it from preempt.h, right before including
asm/preempt.h, instead of defining it to preempt_mask.h with the
other preempt_count symbols. Therefore the preempt_count semantics
happen to be spread out.
* We plan to introduce preempt_active_[enter,exit]() to consolidate
preempt_schedule*() code. But we'll need to access both preempt_count
mutators (preempt_count_add()) and preempt_count symbols
(PREEMPT_ACTIVE, PREEMPT_OFFSET). The usual place to define preempt
operations is in preempt.h but then we'll need symbols in
preempt_mask.h which already includes preempt.h. So we end up with
a ressource circle dependency.
Lets merge preempt_mask.h into preempt.h to solve these dependency issues.
This way we gather semantic symbols and operation definition of
preempt_count in a single file.
This is a dumb copy-paste merge. Further merge re-arrangments are
performed in a subsequent patch to ease review.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-05-12 17:41:46 +03:00
|
|
|
|
2008-05-12 23:20:42 +04:00
|
|
|
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
2013-09-10 14:15:23 +04:00
|
|
|
extern void preempt_count_add(int val);
|
|
|
|
extern void preempt_count_sub(int val);
|
2015-07-15 12:52:04 +03:00
|
|
|
#define preempt_count_dec_and_test() \
|
|
|
|
({ preempt_count_sub(1); should_resched(0); })
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_count_add(val) __preempt_count_add(val)
|
|
|
|
#define preempt_count_sub(val) __preempt_count_sub(val)
|
|
|
|
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define __preempt_count_inc() __preempt_count_add(1)
|
|
|
|
#define __preempt_count_dec() __preempt_count_sub(1)
|
2011-06-08 03:13:27 +04:00
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_count_inc() preempt_count_add(1)
|
|
|
|
#define preempt_count_dec() preempt_count_sub(1)
|
2011-06-08 03:13:27 +04:00
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_COUNT
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#define preempt_disable() \
|
|
|
|
do { \
|
2013-09-10 14:15:23 +04:00
|
|
|
preempt_count_inc(); \
|
2005-04-17 02:20:36 +04:00
|
|
|
barrier(); \
|
|
|
|
} while (0)
|
|
|
|
|
2011-03-21 15:32:17 +03:00
|
|
|
#define sched_preempt_enable_no_resched() \
|
2005-04-17 02:20:36 +04:00
|
|
|
do { \
|
|
|
|
barrier(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
preempt_count_dec(); \
|
2005-04-17 02:20:36 +04:00
|
|
|
} while (0)
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
2011-03-21 15:32:17 +03:00
|
|
|
|
2015-05-12 17:41:47 +03:00
|
|
|
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#ifdef CONFIG_PREEMPT
|
2005-04-17 02:20:36 +04:00
|
|
|
#define preempt_enable() \
|
|
|
|
do { \
|
2013-09-10 14:15:23 +04:00
|
|
|
barrier(); \
|
|
|
|
if (unlikely(preempt_count_dec_and_test())) \
|
2013-08-14 16:51:00 +04:00
|
|
|
__preempt_schedule(); \
|
2005-04-17 02:20:36 +04:00
|
|
|
} while (0)
|
|
|
|
|
2015-06-04 18:39:09 +03:00
|
|
|
#define preempt_enable_notrace() \
|
|
|
|
do { \
|
|
|
|
barrier(); \
|
|
|
|
if (unlikely(__preempt_count_dec_and_test())) \
|
|
|
|
__preempt_schedule_notrace(); \
|
|
|
|
} while (0)
|
|
|
|
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_check_resched() \
|
|
|
|
do { \
|
2015-07-15 12:52:04 +03:00
|
|
|
if (should_resched(0)) \
|
2013-08-14 16:51:00 +04:00
|
|
|
__preempt_schedule(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
} while (0)
|
|
|
|
|
2015-06-04 18:39:09 +03:00
|
|
|
#else /* !CONFIG_PREEMPT */
|
2013-11-20 19:52:19 +04:00
|
|
|
#define preempt_enable() \
|
|
|
|
do { \
|
|
|
|
barrier(); \
|
|
|
|
preempt_count_dec(); \
|
|
|
|
} while (0)
|
2008-05-12 23:20:41 +04:00
|
|
|
|
2015-06-04 18:39:09 +03:00
|
|
|
#define preempt_enable_notrace() \
|
2008-05-12 23:20:41 +04:00
|
|
|
do { \
|
|
|
|
barrier(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
__preempt_count_dec(); \
|
2008-05-12 23:20:41 +04:00
|
|
|
} while (0)
|
|
|
|
|
2015-06-04 18:39:09 +03:00
|
|
|
#define preempt_check_resched() do { } while (0)
|
|
|
|
#endif /* CONFIG_PREEMPT */
|
2013-09-10 14:15:23 +04:00
|
|
|
|
2015-06-04 18:39:09 +03:00
|
|
|
#define preempt_disable_notrace() \
|
2008-05-12 23:20:41 +04:00
|
|
|
do { \
|
2015-06-04 18:39:09 +03:00
|
|
|
__preempt_count_inc(); \
|
2013-09-10 14:15:23 +04:00
|
|
|
barrier(); \
|
2008-05-12 23:20:41 +04:00
|
|
|
} while (0)
|
2015-06-04 18:39:09 +03:00
|
|
|
|
|
|
|
#define preempt_enable_no_resched_notrace() \
|
2013-11-20 19:52:19 +04:00
|
|
|
do { \
|
|
|
|
barrier(); \
|
|
|
|
__preempt_count_dec(); \
|
|
|
|
} while (0)
|
2008-05-12 23:20:41 +04:00
|
|
|
|
2011-06-08 03:13:27 +04:00
|
|
|
#else /* !CONFIG_PREEMPT_COUNT */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
spinlocks and preemption points need to be at least compiler barriers
In UP and non-preempt respectively, the spinlocks and preemption
disable/enable points are stubbed out entirely, because there is no
regular code that can ever hit the kind of concurrency they are meant to
protect against.
However, while there is no regular code that can cause scheduling, we
_do_ end up having some exceptional (literally!) code that can do so,
and that we need to make sure does not ever get moved into the critical
region by the compiler.
In particular, get_user() and put_user() is generally implemented as
inline asm statements (even if the inline asm may then make a call
instruction to call out-of-line), and can obviously cause a page fault
and IO as a result. If that inline asm has been scheduled into the
middle of a preemption-safe (or spinlock-protected) code region, we
obviously lose.
Now, admittedly this is *very* unlikely to actually ever happen, and
we've not seen examples of actual bugs related to this. But partly
exactly because it's so hard to trigger and the resulting bug is so
subtle, we should be extra careful to get this right.
So make sure that even when preemption is disabled, and we don't have to
generate any actual *code* to explicitly tell the system that we are in
a preemption-disabled region, we need to at least tell the compiler not
to move things around the critical region.
This patch grew out of the same discussion that caused commits
79e5f05edcbf ("ARC: Add implicit compiler barrier to raw_local_irq*
functions") and 3e2e0d2c222b ("tile: comment assumption about
__insn_mtspr for <asm/irqflags.h>") to come about.
Note for stable: use discretion when/if applying this. As mentioned,
this bug may never have actually bitten anybody, and gcc may never have
done the required code motion for it to possibly ever trigger in
practice.
Cc: stable@vger.kernel.org
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-09 21:48:33 +04:00
|
|
|
/*
|
|
|
|
* Even if we don't have any preemption, we need preempt disable/enable
|
|
|
|
* to be barriers, so that we don't have things like get_user/put_user
|
|
|
|
* that can cause faults and scheduling migrate into our preempt-protected
|
|
|
|
* region.
|
|
|
|
*/
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_disable() barrier()
|
spinlocks and preemption points need to be at least compiler barriers
In UP and non-preempt respectively, the spinlocks and preemption
disable/enable points are stubbed out entirely, because there is no
regular code that can ever hit the kind of concurrency they are meant to
protect against.
However, while there is no regular code that can cause scheduling, we
_do_ end up having some exceptional (literally!) code that can do so,
and that we need to make sure does not ever get moved into the critical
region by the compiler.
In particular, get_user() and put_user() is generally implemented as
inline asm statements (even if the inline asm may then make a call
instruction to call out-of-line), and can obviously cause a page fault
and IO as a result. If that inline asm has been scheduled into the
middle of a preemption-safe (or spinlock-protected) code region, we
obviously lose.
Now, admittedly this is *very* unlikely to actually ever happen, and
we've not seen examples of actual bugs related to this. But partly
exactly because it's so hard to trigger and the resulting bug is so
subtle, we should be extra careful to get this right.
So make sure that even when preemption is disabled, and we don't have to
generate any actual *code* to explicitly tell the system that we are in
a preemption-disabled region, we need to at least tell the compiler not
to move things around the critical region.
This patch grew out of the same discussion that caused commits
79e5f05edcbf ("ARC: Add implicit compiler barrier to raw_local_irq*
functions") and 3e2e0d2c222b ("tile: comment assumption about
__insn_mtspr for <asm/irqflags.h>") to come about.
Note for stable: use discretion when/if applying this. As mentioned,
this bug may never have actually bitten anybody, and gcc may never have
done the required code motion for it to possibly ever trigger in
practice.
Cc: stable@vger.kernel.org
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-09 21:48:33 +04:00
|
|
|
#define sched_preempt_enable_no_resched() barrier()
|
2013-09-10 14:15:23 +04:00
|
|
|
#define preempt_enable_no_resched() barrier()
|
|
|
|
#define preempt_enable() barrier()
|
|
|
|
#define preempt_check_resched() do { } while (0)
|
spinlocks and preemption points need to be at least compiler barriers
In UP and non-preempt respectively, the spinlocks and preemption
disable/enable points are stubbed out entirely, because there is no
regular code that can ever hit the kind of concurrency they are meant to
protect against.
However, while there is no regular code that can cause scheduling, we
_do_ end up having some exceptional (literally!) code that can do so,
and that we need to make sure does not ever get moved into the critical
region by the compiler.
In particular, get_user() and put_user() is generally implemented as
inline asm statements (even if the inline asm may then make a call
instruction to call out-of-line), and can obviously cause a page fault
and IO as a result. If that inline asm has been scheduled into the
middle of a preemption-safe (or spinlock-protected) code region, we
obviously lose.
Now, admittedly this is *very* unlikely to actually ever happen, and
we've not seen examples of actual bugs related to this. But partly
exactly because it's so hard to trigger and the resulting bug is so
subtle, we should be extra careful to get this right.
So make sure that even when preemption is disabled, and we don't have to
generate any actual *code* to explicitly tell the system that we are in
a preemption-disabled region, we need to at least tell the compiler not
to move things around the critical region.
This patch grew out of the same discussion that caused commits
79e5f05edcbf ("ARC: Add implicit compiler barrier to raw_local_irq*
functions") and 3e2e0d2c222b ("tile: comment assumption about
__insn_mtspr for <asm/irqflags.h>") to come about.
Note for stable: use discretion when/if applying this. As mentioned,
this bug may never have actually bitten anybody, and gcc may never have
done the required code motion for it to possibly ever trigger in
practice.
Cc: stable@vger.kernel.org
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-09 21:48:33 +04:00
|
|
|
|
|
|
|
#define preempt_disable_notrace() barrier()
|
|
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
|
|
#define preempt_enable_notrace() barrier()
|
2015-05-12 17:41:47 +03:00
|
|
|
#define preemptible() 0
|
2008-05-12 23:20:41 +04:00
|
|
|
|
2011-06-08 03:13:27 +04:00
|
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-11-20 19:52:19 +04:00
|
|
|
#ifdef MODULE
|
|
|
|
/*
|
|
|
|
* Modules have no business playing preemption tricks.
|
|
|
|
*/
|
|
|
|
#undef sched_preempt_enable_no_resched
|
|
|
|
#undef preempt_enable_no_resched
|
|
|
|
#undef preempt_enable_no_resched_notrace
|
|
|
|
#undef preempt_check_resched
|
|
|
|
#endif
|
|
|
|
|
2013-11-20 15:22:37 +04:00
|
|
|
#define preempt_set_need_resched() \
|
|
|
|
do { \
|
|
|
|
set_preempt_need_resched(); \
|
|
|
|
} while (0)
|
|
|
|
#define preempt_fold_need_resched() \
|
|
|
|
do { \
|
|
|
|
if (tif_need_resched()) \
|
|
|
|
set_preempt_need_resched(); \
|
|
|
|
} while (0)
|
|
|
|
|
2007-07-26 15:40:43 +04:00
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
|
|
|
|
struct preempt_notifier;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* preempt_ops - notifiers called when a task is preempted and rescheduled
|
|
|
|
* @sched_in: we're about to be rescheduled:
|
|
|
|
* notifier: struct preempt_notifier for the task being scheduled
|
|
|
|
* cpu: cpu we're scheduled on
|
|
|
|
* @sched_out: we've just been preempted
|
|
|
|
* notifier: struct preempt_notifier for the task being preempted
|
|
|
|
* next: the task that's kicking us out
|
2009-12-02 06:56:46 +03:00
|
|
|
*
|
|
|
|
* Please note that sched_in and out are called under different
|
|
|
|
* contexts. sched_out is called with rq lock held and irq disabled
|
|
|
|
* while sched_in is called without rq lock and irq enabled. This
|
|
|
|
* difference is intentional and depended upon by its users.
|
2007-07-26 15:40:43 +04:00
|
|
|
*/
|
|
|
|
struct preempt_ops {
|
|
|
|
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
|
|
|
|
void (*sched_out)(struct preempt_notifier *notifier,
|
|
|
|
struct task_struct *next);
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* preempt_notifier - key for installing preemption notifiers
|
|
|
|
* @link: internal use
|
|
|
|
* @ops: defines the notifier functions to be called
|
|
|
|
*
|
|
|
|
* Usually used in conjunction with container_of().
|
|
|
|
*/
|
|
|
|
struct preempt_notifier {
|
|
|
|
struct hlist_node link;
|
|
|
|
struct preempt_ops *ops;
|
|
|
|
};
|
|
|
|
|
2015-07-03 19:53:58 +03:00
|
|
|
void preempt_notifier_inc(void);
|
|
|
|
void preempt_notifier_dec(void);
|
2007-07-26 15:40:43 +04:00
|
|
|
void preempt_notifier_register(struct preempt_notifier *notifier);
|
|
|
|
void preempt_notifier_unregister(struct preempt_notifier *notifier);
|
|
|
|
|
|
|
|
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
|
|
|
|
struct preempt_ops *ops)
|
|
|
|
{
|
|
|
|
INIT_HLIST_NODE(¬ifier->link);
|
|
|
|
notifier->ops = ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* __LINUX_PREEMPT_H */
|