2005-04-17 02:20:36 +04:00
|
|
|
#ifndef __LINUX_SMPLOCK_H
|
|
|
|
#define __LINUX_SMPLOCK_H
|
|
|
|
|
2005-11-14 03:06:57 +03:00
|
|
|
#ifdef CONFIG_LOCK_KERNEL
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
#define kernel_locked() (current->lock_depth >= 0)
|
|
|
|
|
|
|
|
extern int __lockfunc __reacquire_kernel_lock(void);
|
|
|
|
extern void __lockfunc __release_kernel_lock(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release/re-acquire global kernel lock for the scheduler
|
|
|
|
*/
|
|
|
|
#define release_kernel_lock(tsk) do { \
|
|
|
|
if (unlikely((tsk)->lock_depth >= 0)) \
|
|
|
|
__release_kernel_lock(); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static inline int reacquire_kernel_lock(struct task_struct *task)
|
|
|
|
{
|
|
|
|
if (unlikely(task->lock_depth >= 0))
|
2008-01-25 23:08:33 +03:00
|
|
|
return __reacquire_kernel_lock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
|
|
|
|
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
|
|
|
|
|
2008-05-19 00:27:41 +04:00
|
|
|
/*
|
|
|
|
* Various legacy drivers don't really need the BKL in a specific
|
|
|
|
* function, but they *do* need to know that the BKL became available.
|
|
|
|
* This function just avoids wrapping a bunch of lock/unlock pairs
|
|
|
|
* around code which doesn't really need it.
|
|
|
|
*/
|
|
|
|
static inline void cycle_kernel_lock(void)
|
|
|
|
{
|
|
|
|
lock_kernel();
|
|
|
|
unlock_kernel();
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
|
|
|
|
|
|
|
#define lock_kernel() do { } while(0)
|
|
|
|
#define unlock_kernel() do { } while(0)
|
|
|
|
#define release_kernel_lock(task) do { } while(0)
|
2008-05-19 00:27:41 +04:00
|
|
|
#define cycle_kernel_lock() do { } while(0)
|
2005-04-17 02:20:36 +04:00
|
|
|
#define reacquire_kernel_lock(task) 0
|
|
|
|
#define kernel_locked() 1
|
|
|
|
|
|
|
|
#endif /* CONFIG_LOCK_KERNEL */
|
|
|
|
#endif /* __LINUX_SMPLOCK_H */
|