locking/lockdep: Handle statically initialized PER_CPU locks properly
If a PER_CPU struct which contains a spin_lock is statically initialized via: DEFINE_PER_CPU(struct foo, bla) = { .lock = __SPIN_LOCK_UNLOCKED(bla.lock) }; then lockdep assigns a seperate key to each lock because the logic for assigning a key to statically initialized locks is to use the address as the key. With per CPU locks the address is obvioulsy different on each CPU. That's wrong, because all locks should have the same key. To solve this the following modifications are required: 1) Extend the is_kernel/module_percpu_addr() functions to hand back the canonical address of the per CPU address, i.e. the per CPU address minus the per CPU offset. 2) Check the lock address with these functions and if the per CPU check matches use the returned canonical address as the lock key, so all per CPU locks have the same key. 3) Move the static_obj(key) check into look_up_lock_class() so this check can be avoided for statically initialized per CPU locks. That's required because the canonical address fails the static_obj(key) check for obvious reasons. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [ Merged Dan's fixups for !MODULES and !SMP into this patch. ] Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dan Murphy <dmurphy@ti.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20170227143736.pectaimkjkan5kow@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
6419c4af77
Коммит
383776fa75
|
@ -493,6 +493,7 @@ static inline int module_is_live(struct module *mod)
|
||||||
struct module *__module_text_address(unsigned long addr);
|
struct module *__module_text_address(unsigned long addr);
|
||||||
struct module *__module_address(unsigned long addr);
|
struct module *__module_address(unsigned long addr);
|
||||||
bool is_module_address(unsigned long addr);
|
bool is_module_address(unsigned long addr);
|
||||||
|
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
|
||||||
bool is_module_percpu_address(unsigned long addr);
|
bool is_module_percpu_address(unsigned long addr);
|
||||||
bool is_module_text_address(unsigned long addr);
|
bool is_module_text_address(unsigned long addr);
|
||||||
|
|
||||||
|
@ -660,6 +661,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool is_module_text_address(unsigned long addr)
|
static inline bool is_module_text_address(unsigned long addr)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
|
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
|
||||||
|
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
|
||||||
extern bool is_kernel_percpu_address(unsigned long addr);
|
extern bool is_kernel_percpu_address(unsigned long addr);
|
||||||
|
|
||||||
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
|
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
|
||||||
|
|
|
@ -660,6 +660,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||||
struct lockdep_subclass_key *key;
|
struct lockdep_subclass_key *key;
|
||||||
struct hlist_head *hash_head;
|
struct hlist_head *hash_head;
|
||||||
struct lock_class *class;
|
struct lock_class *class;
|
||||||
|
bool is_static = false;
|
||||||
|
|
||||||
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
||||||
debug_locks_off();
|
debug_locks_off();
|
||||||
|
@ -673,10 +674,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Static locks do not have their class-keys yet - for them the key
|
* Static locks do not have their class-keys yet - for them the key
|
||||||
* is the lock object itself:
|
* is the lock object itself. If the lock is in the per cpu area,
|
||||||
|
* the canonical address of the lock (per cpu offset removed) is
|
||||||
|
* used.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!lock->key))
|
if (unlikely(!lock->key)) {
|
||||||
|
unsigned long can_addr, addr = (unsigned long)lock;
|
||||||
|
|
||||||
|
if (__is_kernel_percpu_address(addr, &can_addr))
|
||||||
|
lock->key = (void *)can_addr;
|
||||||
|
else if (__is_module_percpu_address(addr, &can_addr))
|
||||||
|
lock->key = (void *)can_addr;
|
||||||
|
else if (static_obj(lock))
|
||||||
lock->key = (void *)lock;
|
lock->key = (void *)lock;
|
||||||
|
else
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
is_static = true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: the class-key must be unique. For dynamic locks, a static
|
* NOTE: the class-key must be unique. For dynamic locks, a static
|
||||||
|
@ -708,7 +722,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -726,19 +740,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
||||||
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
|
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
class = look_up_lock_class(lock, subclass);
|
class = look_up_lock_class(lock, subclass);
|
||||||
if (likely(class))
|
if (likely(!IS_ERR_OR_NULL(class)))
|
||||||
goto out_set_class_cache;
|
goto out_set_class_cache;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Debug-check: all keys must be persistent!
|
* Debug-check: all keys must be persistent!
|
||||||
*/
|
*/
|
||||||
if (!static_obj(lock->key)) {
|
if (IS_ERR(class)) {
|
||||||
debug_locks_off();
|
debug_locks_off();
|
||||||
printk("INFO: trying to register non-static key.\n");
|
printk("INFO: trying to register non-static key.\n");
|
||||||
printk("the code is fine but needs lockdep annotation.\n");
|
printk("the code is fine but needs lockdep annotation.\n");
|
||||||
printk("turning off the locking correctness validator.\n");
|
printk("turning off the locking correctness validator.\n");
|
||||||
dump_stack();
|
dump_stack();
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3419,7 +3432,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
||||||
* Clearly if the lock hasn't been acquired _ever_, we're not
|
* Clearly if the lock hasn't been acquired _ever_, we're not
|
||||||
* holding it either, so report failure.
|
* holding it either, so report failure.
|
||||||
*/
|
*/
|
||||||
if (!class)
|
if (IS_ERR_OR_NULL(class))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4225,7 +4238,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
||||||
* If the class exists we look it up and zap it:
|
* If the class exists we look it up and zap it:
|
||||||
*/
|
*/
|
||||||
class = look_up_lock_class(lock, j);
|
class = look_up_lock_class(lock, j);
|
||||||
if (class)
|
if (!IS_ERR_OR_NULL(class))
|
||||||
zap_class(class);
|
zap_class(class);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -665,16 +665,7 @@ static void percpu_modcopy(struct module *mod,
|
||||||
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
|
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
|
||||||
* is_module_percpu_address - test whether address is from module static percpu
|
|
||||||
* @addr: address to test
|
|
||||||
*
|
|
||||||
* Test whether @addr belongs to module static percpu area.
|
|
||||||
*
|
|
||||||
* RETURNS:
|
|
||||||
* %true if @addr is from module static percpu area
|
|
||||||
*/
|
|
||||||
bool is_module_percpu_address(unsigned long addr)
|
|
||||||
{
|
{
|
||||||
struct module *mod;
|
struct module *mod;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
@ -688,9 +679,11 @@ bool is_module_percpu_address(unsigned long addr)
|
||||||
continue;
|
continue;
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
void *start = per_cpu_ptr(mod->percpu, cpu);
|
void *start = per_cpu_ptr(mod->percpu, cpu);
|
||||||
|
void *va = (void *)addr;
|
||||||
|
|
||||||
if ((void *)addr >= start &&
|
if (va >= start && va < start + mod->percpu_size) {
|
||||||
(void *)addr < start + mod->percpu_size) {
|
if (can_addr)
|
||||||
|
*can_addr = (unsigned long) (va - start);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -701,6 +694,20 @@ bool is_module_percpu_address(unsigned long addr)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* is_module_percpu_address - test whether address is from module static percpu
|
||||||
|
* @addr: address to test
|
||||||
|
*
|
||||||
|
* Test whether @addr belongs to module static percpu area.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* %true if @addr is from module static percpu area
|
||||||
|
*/
|
||||||
|
bool is_module_percpu_address(unsigned long addr)
|
||||||
|
{
|
||||||
|
return __is_module_percpu_address(addr, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* ... !CONFIG_SMP */
|
#else /* ... !CONFIG_SMP */
|
||||||
|
|
||||||
static inline void __percpu *mod_percpu(struct module *mod)
|
static inline void __percpu *mod_percpu(struct module *mod)
|
||||||
|
@ -732,6 +739,11 @@ bool is_module_percpu_address(unsigned long addr)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#define MODINFO_ATTR(field) \
|
#define MODINFO_ATTR(field) \
|
||||||
|
|
37
mm/percpu.c
37
mm/percpu.c
|
@ -1281,6 +1281,28 @@ void free_percpu(void __percpu *ptr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(free_percpu);
|
EXPORT_SYMBOL_GPL(free_percpu);
|
||||||
|
|
||||||
|
bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
const size_t static_size = __per_cpu_end - __per_cpu_start;
|
||||||
|
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
void *start = per_cpu_ptr(base, cpu);
|
||||||
|
void *va = (void *)addr;
|
||||||
|
|
||||||
|
if (va >= start && va < start + static_size) {
|
||||||
|
if (can_addr)
|
||||||
|
*can_addr = (unsigned long) (va - start);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
/* on UP, can't distinguish from other static vars, always false */
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* is_kernel_percpu_address - test whether address is from static percpu area
|
* is_kernel_percpu_address - test whether address is from static percpu area
|
||||||
* @addr: address to test
|
* @addr: address to test
|
||||||
|
@ -1294,20 +1316,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
|
||||||
*/
|
*/
|
||||||
bool is_kernel_percpu_address(unsigned long addr)
|
bool is_kernel_percpu_address(unsigned long addr)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
return __is_kernel_percpu_address(addr, NULL);
|
||||||
const size_t static_size = __per_cpu_end - __per_cpu_start;
|
|
||||||
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
void *start = per_cpu_ptr(base, cpu);
|
|
||||||
|
|
||||||
if ((void *)addr >= start && (void *)addr < start + static_size)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/* on UP, can't distinguish from other static vars, always false */
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Загрузка…
Ссылка в новой задаче