lockdep: Assign lock keys on registration

Lockdep is assigning lock keys when a lock was looked up.  This is
unnecessary; if the lock has never been registered then it is known that it
is not locked.  It also complicates the calling convention.  Switch to
assigning the lock key in register_lock_class().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Link: https://lkml.kernel.org/r/20180117151414.23686-2-willy@infradead.org
This commit is contained in:
Matthew Wilcox 2018-01-17 07:14:12 -08:00 коммит произвёл Thomas Gleixner
Родитель 1d966eb4d6
Коммит 64f29d1bc9
1 изменённых файлов: 40 добавлений и 36 удалений

Просмотреть файл

@ -647,18 +647,12 @@ static int count_matching_names(struct lock_class *new_class)
return count + 1; return count + 1;
} }
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
*/
static inline struct lock_class * static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{ {
struct lockdep_subclass_key *key; struct lockdep_subclass_key *key;
struct hlist_head *hash_head; struct hlist_head *hash_head;
struct lock_class *class; struct lock_class *class;
bool is_static = false;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off(); debug_locks_off();
@ -671,24 +665,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
} }
/* /*
* Static locks do not have their class-keys yet - for them the key * If it is not initialised then it has never been locked,
* is the lock object itself. If the lock is in the per cpu area, * so it won't be present in the hash table.
* the canonical address of the lock (per cpu offset removed) is
* used.
*/ */
if (unlikely(!lock->key)) { if (unlikely(!lock->key))
unsigned long can_addr, addr = (unsigned long)lock; return NULL;
if (__is_kernel_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (__is_module_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (static_obj(lock))
lock->key = (void *)lock;
else
return ERR_PTR(-EINVAL);
is_static = true;
}
/* /*
* NOTE: the class-key must be unique. For dynamic locks, a static * NOTE: the class-key must be unique. For dynamic locks, a static
@ -720,7 +701,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
} }
} }
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); return NULL;
}
/*
* Static locks do not have their class-keys yet - for them the key is
* the lock object itself. If the lock is in the per cpu area, the
* canonical address of the lock (per cpu offset removed) is used.
*/
static bool assign_lock_key(struct lockdep_map *lock)
{
unsigned long can_addr, addr = (unsigned long)lock;
if (__is_kernel_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (__is_module_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (static_obj(lock))
lock->key = (void *)lock;
else {
/* Debug-check: all keys must be persistent! */
debug_locks_off();
pr_err("INFO: trying to register non-static key.\n");
pr_err("the code is fine but needs lockdep annotation.\n");
pr_err("turning off the locking correctness validator.\n");
dump_stack();
return false;
}
return true;
} }
/* /*
@ -738,18 +747,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
DEBUG_LOCKS_WARN_ON(!irqs_disabled()); DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass); class = look_up_lock_class(lock, subclass);
if (likely(!IS_ERR_OR_NULL(class))) if (likely(class))
goto out_set_class_cache; goto out_set_class_cache;
/* if (!lock->key) {
* Debug-check: all keys must be persistent! if (!assign_lock_key(lock))
*/ return NULL;
if (IS_ERR(class)) { } else if (!static_obj(lock->key)) {
debug_locks_off();
printk("INFO: trying to register non-static key.\n");
printk("the code is fine but needs lockdep annotation.\n");
printk("turning off the locking correctness validator.\n");
dump_stack();
return NULL; return NULL;
} }
@ -3498,7 +3502,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
* Clearly if the lock hasn't been acquired _ever_, we're not * Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure. * holding it either, so report failure.
*/ */
if (IS_ERR_OR_NULL(class)) if (!class)
return 0; return 0;
/* /*
@ -4294,7 +4298,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* If the class exists we look it up and zap it: * If the class exists we look it up and zap it:
*/ */
class = look_up_lock_class(lock, j); class = look_up_lock_class(lock, j);
if (!IS_ERR_OR_NULL(class)) if (class)
zap_class(class); zap_class(class);
} }
/* /*