2014-07-14 21:27:49 +04:00
|
|
|
#ifndef __LINUX_OSQ_LOCK_H
|
|
|
|
#define __LINUX_OSQ_LOCK_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An MCS like lock especially tailored for optimistic spinning for sleeping
|
|
|
|
* lock implementations (mutex, rwsem, etc).
|
|
|
|
*/
|
2015-01-06 22:45:07 +03:00
|
|
|
struct optimistic_spin_node {
|
|
|
|
struct optimistic_spin_node *next, *prev;
|
|
|
|
int locked; /* 1 if lock acquired */
|
|
|
|
int cpu; /* encoded CPU # + 1 value */
|
|
|
|
};
|
2014-07-14 21:27:49 +04:00
|
|
|
|
|
|
|
struct optimistic_spin_queue {
|
|
|
|
/*
|
|
|
|
* Stores an encoded value of the CPU # of the tail node in the queue.
|
|
|
|
* If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
|
|
|
|
*/
|
|
|
|
atomic_t tail;
|
|
|
|
};
|
|
|
|
|
2015-01-06 22:45:07 +03:00
|
|
|
#define OSQ_UNLOCKED_VAL (0)
|
|
|
|
|
2014-07-14 21:27:50 +04:00
|
|
|
/* Init macro and function. */
|
|
|
|
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
|
|
|
|
|
|
|
|
static inline void osq_lock_init(struct optimistic_spin_queue *lock)
|
|
|
|
{
|
|
|
|
atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
|
|
|
|
}
|
|
|
|
|
2015-01-06 22:45:07 +03:00
|
|
|
extern bool osq_lock(struct optimistic_spin_queue *lock);
|
|
|
|
extern void osq_unlock(struct optimistic_spin_queue *lock);
|
|
|
|
|
2015-05-01 00:12:16 +03:00
|
|
|
static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
|
|
|
|
{
|
|
|
|
return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
|
|
|
|
}
|
|
|
|
|
2014-07-14 21:27:49 +04:00
|
|
|
#endif
|