locking/lockdep: Rename lockdep_assert_held_exclusive() -> lockdep_assert_held_write()
All callers of lockdep_assert_held_exclusive() use it to verify the correct locking state of either a semaphore (ldisc_sem in tty, mmap_sem for perf events, i_rwsem of inode for dax) or rwlock by apparmor. Thus it makes sense to rename _exclusive to _write since that's the semantics callers care. Additionally there is already lockdep_assert_held_read(), which this new naming is more consistent with. No functional changes. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190531100651.3969-1-nborisov@suse.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
ba54f0c3f7
Коммит
9ffbe8ac05
|
@ -2179,7 +2179,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
|
||||||
* For now, this can't happen because all callers hold mmap_sem
|
* For now, this can't happen because all callers hold mmap_sem
|
||||||
* for write. If this changes, we'll need a different solution.
|
* for write. If this changes, we'll need a different solution.
|
||||||
*/
|
*/
|
||||||
lockdep_assert_held_exclusive(&mm->mmap_sem);
|
lockdep_assert_held_write(&mm->mmap_sem);
|
||||||
|
|
||||||
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
|
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
|
||||||
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
|
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
|
||||||
|
|
|
@ -457,7 +457,7 @@ static int alloc_name(struct ib_device *ibdev, const char *name)
|
||||||
int rc;
|
int rc;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
lockdep_assert_held_exclusive(&devices_rwsem);
|
lockdep_assert_held_write(&devices_rwsem);
|
||||||
ida_init(&inuse);
|
ida_init(&inuse);
|
||||||
xa_for_each (&devices, index, device) {
|
xa_for_each (&devices, index, device) {
|
||||||
char buf[IB_DEVICE_NAME_MAX];
|
char buf[IB_DEVICE_NAME_MAX];
|
||||||
|
|
|
@ -487,7 +487,7 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
|
||||||
|
|
||||||
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
|
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
|
||||||
{
|
{
|
||||||
lockdep_assert_held_exclusive(&tty->ldisc_sem);
|
lockdep_assert_held_write(&tty->ldisc_sem);
|
||||||
WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
|
WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
|
||||||
clear_bit(TTY_LDISC_OPEN, &tty->flags);
|
clear_bit(TTY_LDISC_OPEN, &tty->flags);
|
||||||
if (ld->ops->close)
|
if (ld->ops->close)
|
||||||
|
@ -509,7 +509,7 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
|
||||||
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
|
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
lockdep_assert_held_exclusive(&tty->ldisc_sem);
|
lockdep_assert_held_write(&tty->ldisc_sem);
|
||||||
if (IS_ERR(disc))
|
if (IS_ERR(disc))
|
||||||
return PTR_ERR(disc);
|
return PTR_ERR(disc);
|
||||||
tty->ldisc = disc;
|
tty->ldisc = disc;
|
||||||
|
@ -633,7 +633,7 @@ EXPORT_SYMBOL_GPL(tty_set_ldisc);
|
||||||
*/
|
*/
|
||||||
static void tty_ldisc_kill(struct tty_struct *tty)
|
static void tty_ldisc_kill(struct tty_struct *tty)
|
||||||
{
|
{
|
||||||
lockdep_assert_held_exclusive(&tty->ldisc_sem);
|
lockdep_assert_held_write(&tty->ldisc_sem);
|
||||||
if (!tty->ldisc)
|
if (!tty->ldisc)
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
|
@ -681,7 +681,7 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
|
||||||
struct tty_ldisc *ld;
|
struct tty_ldisc *ld;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
lockdep_assert_held_exclusive(&tty->ldisc_sem);
|
lockdep_assert_held_write(&tty->ldisc_sem);
|
||||||
ld = tty_ldisc_get(tty, disc);
|
ld = tty_ldisc_get(tty, disc);
|
||||||
if (IS_ERR(ld)) {
|
if (IS_ERR(ld)) {
|
||||||
BUG_ON(disc == N_TTY);
|
BUG_ON(disc == N_TTY);
|
||||||
|
|
2
fs/dax.c
2
fs/dax.c
|
@ -1188,7 +1188,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
unsigned flags = 0;
|
unsigned flags = 0;
|
||||||
|
|
||||||
if (iov_iter_rw(iter) == WRITE) {
|
if (iov_iter_rw(iter) == WRITE) {
|
||||||
lockdep_assert_held_exclusive(&inode->i_rwsem);
|
lockdep_assert_held_write(&inode->i_rwsem);
|
||||||
flags |= IOMAP_WRITE;
|
flags |= IOMAP_WRITE;
|
||||||
} else {
|
} else {
|
||||||
lockdep_assert_held(&inode->i_rwsem);
|
lockdep_assert_held(&inode->i_rwsem);
|
||||||
|
|
|
@ -394,7 +394,7 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
|
||||||
WARN_ON(debug_locks && !lockdep_is_held(l)); \
|
WARN_ON(debug_locks && !lockdep_is_held(l)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define lockdep_assert_held_exclusive(l) do { \
|
#define lockdep_assert_held_write(l) do { \
|
||||||
WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
|
WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
@ -479,7 +479,7 @@ struct lockdep_map { };
|
||||||
#define lockdep_is_held_type(l, r) (1)
|
#define lockdep_is_held_type(l, r) (1)
|
||||||
|
|
||||||
#define lockdep_assert_held(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held(l) do { (void)(l); } while (0)
|
||||||
#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
|
||||||
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
|
||||||
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
|
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
|
||||||
|
|
||||||
AA_BUG(!orig);
|
AA_BUG(!orig);
|
||||||
AA_BUG(!new);
|
AA_BUG(!new);
|
||||||
lockdep_assert_held_exclusive(&labels_set(orig)->lock);
|
lockdep_assert_held_write(&labels_set(orig)->lock);
|
||||||
|
|
||||||
tmp = rcu_dereference_protected(orig->proxy->label,
|
tmp = rcu_dereference_protected(orig->proxy->label,
|
||||||
&labels_ns(orig)->lock);
|
&labels_ns(orig)->lock);
|
||||||
|
@ -566,7 +566,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new)
|
||||||
|
|
||||||
AA_BUG(!ls);
|
AA_BUG(!ls);
|
||||||
AA_BUG(!label);
|
AA_BUG(!label);
|
||||||
lockdep_assert_held_exclusive(&ls->lock);
|
lockdep_assert_held_write(&ls->lock);
|
||||||
|
|
||||||
if (new)
|
if (new)
|
||||||
__aa_proxy_redirect(label, new);
|
__aa_proxy_redirect(label, new);
|
||||||
|
@ -603,7 +603,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
|
||||||
AA_BUG(!ls);
|
AA_BUG(!ls);
|
||||||
AA_BUG(!old);
|
AA_BUG(!old);
|
||||||
AA_BUG(!new);
|
AA_BUG(!new);
|
||||||
lockdep_assert_held_exclusive(&ls->lock);
|
lockdep_assert_held_write(&ls->lock);
|
||||||
AA_BUG(new->flags & FLAG_IN_TREE);
|
AA_BUG(new->flags & FLAG_IN_TREE);
|
||||||
|
|
||||||
if (!label_is_stale(old))
|
if (!label_is_stale(old))
|
||||||
|
@ -640,7 +640,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
|
||||||
AA_BUG(!ls);
|
AA_BUG(!ls);
|
||||||
AA_BUG(!label);
|
AA_BUG(!label);
|
||||||
AA_BUG(labels_set(label) != ls);
|
AA_BUG(labels_set(label) != ls);
|
||||||
lockdep_assert_held_exclusive(&ls->lock);
|
lockdep_assert_held_write(&ls->lock);
|
||||||
AA_BUG(label->flags & FLAG_IN_TREE);
|
AA_BUG(label->flags & FLAG_IN_TREE);
|
||||||
|
|
||||||
/* Figure out where to put new node */
|
/* Figure out where to put new node */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче