module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING
Andrew worried about the overhead on small systems; only use the fancy code when either perf or tracing is enabled. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steven Rostedt <rostedt@goodmis.org> Requested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Родитель
93c2e105f6
Коммит
6c9692e2d6
|
@ -282,7 +282,7 @@ struct module {
|
|||
*
|
||||
* Cacheline align here, such that:
|
||||
* module_init, module_core, init_size, core_size,
|
||||
* init_text_size, core_text_size and ltn_core.node[0]
|
||||
* init_text_size, core_text_size and mtn_core::{mod,node[0]}
|
||||
* are on the same cacheline.
|
||||
*/
|
||||
void *module_init ____cacheline_aligned;
|
||||
|
@ -296,6 +296,7 @@ struct module {
|
|||
/* The size of the executable code in each section. */
|
||||
unsigned int init_text_size, core_text_size;
|
||||
|
||||
#ifdef CONFIG_MODULES_TREE_LOOKUP
|
||||
/*
|
||||
* We want mtn_core::{mod,node[0]} to be in the same cacheline as the
|
||||
* above entries such that a regular lookup will only touch one
|
||||
|
@ -303,6 +304,7 @@ struct module {
|
|||
*/
|
||||
struct mod_tree_node mtn_core;
|
||||
struct mod_tree_node mtn_init;
|
||||
#endif
|
||||
|
||||
/* Size of RO sections of the module (text+rodata) */
|
||||
unsigned int init_ro_size, core_ro_size;
|
||||
|
|
|
@ -1989,6 +1989,10 @@ endchoice
|
|||
|
||||
endif # MODULES
|
||||
|
||||
config MODULES_TREE_LOOKUP
|
||||
def_bool y
|
||||
depends on PERF_EVENTS || TRACING
|
||||
|
||||
config INIT_ALL_POSSIBLE
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -102,6 +102,8 @@ DEFINE_MUTEX(module_mutex);
|
|||
EXPORT_SYMBOL_GPL(module_mutex);
|
||||
static LIST_HEAD(modules);
|
||||
|
||||
#ifdef CONFIG_MODULES_TREE_LOOKUP
|
||||
|
||||
/*
|
||||
* Use a latched RB-tree for __module_address(); this allows us to use
|
||||
* RCU-sched lookups of the address from any context.
|
||||
|
@ -112,6 +114,10 @@ static LIST_HEAD(modules);
|
|||
*
|
||||
* Because init ranges are short lived we mark them unlikely and have placed
|
||||
* them outside the critical cacheline in struct module.
|
||||
*
|
||||
* This is conditional on PERF_EVENTS || TRACING because those can really hit
|
||||
* __module_address() hard by doing a lot of stack unwinding; potentially from
|
||||
* NMI context.
|
||||
*/
|
||||
|
||||
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
|
||||
|
@ -192,7 +198,7 @@ static void mod_tree_remove(struct module *mod)
|
|||
mod_tree_remove_init(mod);
|
||||
}
|
||||
|
||||
static struct module *mod_tree_find(unsigned long addr)
|
||||
static struct module *mod_find(unsigned long addr)
|
||||
{
|
||||
struct latch_tree_node *ltn;
|
||||
|
||||
|
@ -203,6 +209,26 @@ static struct module *mod_tree_find(unsigned long addr)
|
|||
return container_of(ltn, struct mod_tree_node, node)->mod;
|
||||
}
|
||||
|
||||
#else /* MODULES_TREE_LOOKUP */
|
||||
|
||||
static void mod_tree_insert(struct module *mod) { }
|
||||
static void mod_tree_remove_init(struct module *mod) { }
|
||||
static void mod_tree_remove(struct module *mod) { }
|
||||
|
||||
static struct module *mod_find(unsigned long addr)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
if (within_module(addr, mod))
|
||||
return mod;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* MODULES_TREE_LOOKUP */
|
||||
|
||||
#ifdef CONFIG_KGDB_KDB
|
||||
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
|
||||
#endif /* CONFIG_KGDB_KDB */
|
||||
|
@ -3966,7 +3992,7 @@ struct module *__module_address(unsigned long addr)
|
|||
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
mod = mod_tree_find(addr);
|
||||
mod = mod_find(addr);
|
||||
if (mod) {
|
||||
BUG_ON(!within_module(addr, mod));
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
|
|
Загрузка…
Ссылка в новой задаче