seqlock: Prefix internal seqcount_t-only macros with a "do_"
When the seqcount_LOCKNAME_t group of data types were introduced, two classes of seqlock.h sequence counter macros were added: - An external public API which can either take a plain seqcount_t or any of the seqcount_LOCKNAME_t variants. - An internal API which takes only a plain seqcount_t. To distinguish between the two groups, the "*_seqcount_t_*" pattern was used for the latter. This confused a number of mm/ call-site developers, and Linus also commented that it was not a standard practice for marking seqlock.h internal APIs. Distinguish the latter group of macros by prefixing a "do_". Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/CAHk-=wikhGExmprXgaW+MVXG1zsGpztBbVwOb23vetk41EtTBQ@mail.gmail.com
This commit is contained in:
Родитель
cf48647243
Коммит
66bcfcdf89
|
@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
|
|||
* Return: true if a read section retry is required, else false
|
||||
*/
|
||||
#define __read_seqcount_retry(s, start) \
|
||||
__read_seqcount_t_retry(seqprop_ptr(s), start)
|
||||
do___read_seqcount_retry(seqprop_ptr(s), start)
|
||||
|
||||
static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
|
||||
static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||
{
|
||||
kcsan_atomic_next(0);
|
||||
return unlikely(READ_ONCE(s->sequence) != start);
|
||||
|
@ -445,12 +445,12 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
|
|||
* Return: true if a read section retry is required, else false
|
||||
*/
|
||||
#define read_seqcount_retry(s, start) \
|
||||
read_seqcount_t_retry(seqprop_ptr(s), start)
|
||||
do_read_seqcount_retry(seqprop_ptr(s), start)
|
||||
|
||||
static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
|
||||
static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||
{
|
||||
smp_rmb();
|
||||
return __read_seqcount_t_retry(s, start);
|
||||
return do___read_seqcount_retry(s, start);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -462,10 +462,10 @@ do { \
|
|||
if (seqprop_preemptible(s)) \
|
||||
preempt_disable(); \
|
||||
\
|
||||
raw_write_seqcount_t_begin(seqprop_ptr(s)); \
|
||||
do_raw_write_seqcount_begin(seqprop_ptr(s)); \
|
||||
} while (0)
|
||||
|
||||
static inline void raw_write_seqcount_t_begin(seqcount_t *s)
|
||||
static inline void do_raw_write_seqcount_begin(seqcount_t *s)
|
||||
{
|
||||
kcsan_nestable_atomic_begin();
|
||||
s->sequence++;
|
||||
|
@ -478,13 +478,13 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
|
|||
*/
|
||||
#define raw_write_seqcount_end(s) \
|
||||
do { \
|
||||
raw_write_seqcount_t_end(seqprop_ptr(s)); \
|
||||
do_raw_write_seqcount_end(seqprop_ptr(s)); \
|
||||
\
|
||||
if (seqprop_preemptible(s)) \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
static inline void raw_write_seqcount_t_end(seqcount_t *s)
|
||||
static inline void do_raw_write_seqcount_end(seqcount_t *s)
|
||||
{
|
||||
smp_wmb();
|
||||
s->sequence++;
|
||||
|
@ -506,12 +506,12 @@ do { \
|
|||
if (seqprop_preemptible(s)) \
|
||||
preempt_disable(); \
|
||||
\
|
||||
write_seqcount_t_begin_nested(seqprop_ptr(s), subclass); \
|
||||
do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
|
||||
} while (0)
|
||||
|
||||
static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
|
||||
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
|
||||
{
|
||||
raw_write_seqcount_t_begin(s);
|
||||
do_raw_write_seqcount_begin(s);
|
||||
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
|
||||
}
|
||||
|
||||
|
@ -533,12 +533,12 @@ do { \
|
|||
if (seqprop_preemptible(s)) \
|
||||
preempt_disable(); \
|
||||
\
|
||||
write_seqcount_t_begin(seqprop_ptr(s)); \
|
||||
do_write_seqcount_begin(seqprop_ptr(s)); \
|
||||
} while (0)
|
||||
|
||||
static inline void write_seqcount_t_begin(seqcount_t *s)
|
||||
static inline void do_write_seqcount_begin(seqcount_t *s)
|
||||
{
|
||||
write_seqcount_t_begin_nested(s, 0);
|
||||
do_write_seqcount_begin_nested(s, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -549,16 +549,16 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
|
|||
*/
|
||||
#define write_seqcount_end(s) \
|
||||
do { \
|
||||
write_seqcount_t_end(seqprop_ptr(s)); \
|
||||
do_write_seqcount_end(seqprop_ptr(s)); \
|
||||
\
|
||||
if (seqprop_preemptible(s)) \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
static inline void write_seqcount_t_end(seqcount_t *s)
|
||||
static inline void do_write_seqcount_end(seqcount_t *s)
|
||||
{
|
||||
seqcount_release(&s->dep_map, _RET_IP_);
|
||||
raw_write_seqcount_t_end(s);
|
||||
do_raw_write_seqcount_end(s);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -603,9 +603,9 @@ static inline void write_seqcount_t_end(seqcount_t *s)
|
|||
* }
|
||||
*/
|
||||
#define raw_write_seqcount_barrier(s) \
|
||||
raw_write_seqcount_t_barrier(seqprop_ptr(s))
|
||||
do_raw_write_seqcount_barrier(seqprop_ptr(s))
|
||||
|
||||
static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
|
||||
static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
|
||||
{
|
||||
kcsan_nestable_atomic_begin();
|
||||
s->sequence++;
|
||||
|
@ -623,9 +623,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
|
|||
* will complete successfully and see data older than this.
|
||||
*/
|
||||
#define write_seqcount_invalidate(s) \
|
||||
write_seqcount_t_invalidate(seqprop_ptr(s))
|
||||
do_write_seqcount_invalidate(seqprop_ptr(s))
|
||||
|
||||
static inline void write_seqcount_t_invalidate(seqcount_t *s)
|
||||
static inline void do_write_seqcount_invalidate(seqcount_t *s)
|
||||
{
|
||||
smp_wmb();
|
||||
kcsan_nestable_atomic_begin();
|
||||
|
@ -865,9 +865,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|||
}
|
||||
|
||||
/*
|
||||
* For all seqlock_t write side functions, use write_seqcount_*t*_begin()
|
||||
* instead of the generic write_seqcount_begin(). This way, no redundant
|
||||
* lockdep_assert_held() checks are added.
|
||||
* For all seqlock_t write side functions, use the the internal
|
||||
* do_write_seqcount_begin() instead of generic write_seqcount_begin().
|
||||
* This way, no redundant lockdep_assert_held() checks are added.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -886,7 +886,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|||
static inline void write_seqlock(seqlock_t *sl)
|
||||
{
|
||||
spin_lock(&sl->lock);
|
||||
write_seqcount_t_begin(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_begin(&sl->seqcount.seqcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -898,7 +898,7 @@ static inline void write_seqlock(seqlock_t *sl)
|
|||
*/
|
||||
static inline void write_sequnlock(seqlock_t *sl)
|
||||
{
|
||||
write_seqcount_t_end(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_end(&sl->seqcount.seqcount);
|
||||
spin_unlock(&sl->lock);
|
||||
}
|
||||
|
||||
|
@ -912,7 +912,7 @@ static inline void write_sequnlock(seqlock_t *sl)
|
|||
static inline void write_seqlock_bh(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_bh(&sl->lock);
|
||||
write_seqcount_t_begin(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_begin(&sl->seqcount.seqcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -925,7 +925,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
|
|||
*/
|
||||
static inline void write_sequnlock_bh(seqlock_t *sl)
|
||||
{
|
||||
write_seqcount_t_end(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_end(&sl->seqcount.seqcount);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
|
@ -939,7 +939,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
|
|||
static inline void write_seqlock_irq(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_irq(&sl->lock);
|
||||
write_seqcount_t_begin(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_begin(&sl->seqcount.seqcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -951,7 +951,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
|
|||
*/
|
||||
static inline void write_sequnlock_irq(seqlock_t *sl)
|
||||
{
|
||||
write_seqcount_t_end(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_end(&sl->seqcount.seqcount);
|
||||
spin_unlock_irq(&sl->lock);
|
||||
}
|
||||
|
||||
|
@ -960,7 +960,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sl->lock, flags);
|
||||
write_seqcount_t_begin(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_begin(&sl->seqcount.seqcount);
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
@ -989,7 +989,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|||
static inline void
|
||||
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
||||
{
|
||||
write_seqcount_t_end(&sl->seqcount.seqcount);
|
||||
do_write_seqcount_end(&sl->seqcount.seqcount);
|
||||
spin_unlock_irqrestore(&sl->lock, flags);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче