2010-09-17 19:09:00 +04:00
|
|
|
#ifndef _LINUX_JUMP_LABEL_H
|
|
|
|
#define _LINUX_JUMP_LABEL_H
|
|
|
|
|
2012-01-26 16:32:15 +04:00
|
|
|
/*
|
|
|
|
* Jump label support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
|
2015-11-16 13:08:45 +03:00
|
|
|
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
2012-01-26 16:32:15 +04:00
|
|
|
*
|
2015-07-30 06:59:48 +03:00
|
|
|
* DEPRECATED API:
|
|
|
|
*
|
|
|
|
* The use of 'struct static_key' directly, is now DEPRECATED. In addition
|
|
|
|
* static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
|
|
|
|
*
|
|
|
|
* struct static_key false = STATIC_KEY_INIT_FALSE;
|
|
|
|
* struct static_key true = STATIC_KEY_INIT_TRUE;
|
|
|
|
* static_key_true()
|
|
|
|
* static_key_false()
|
|
|
|
*
|
|
|
|
* The updated API replacements are:
|
|
|
|
*
|
|
|
|
* DEFINE_STATIC_KEY_TRUE(key);
|
|
|
|
* DEFINE_STATIC_KEY_FALSE(key);
|
2016-09-05 20:25:47 +03:00
|
|
|
* DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
|
|
|
|
* DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
|
2015-09-15 02:11:05 +03:00
|
|
|
* static_branch_likely()
|
|
|
|
* static_branch_unlikely()
|
2015-07-30 06:59:48 +03:00
|
|
|
*
|
2012-01-26 16:32:15 +04:00
|
|
|
* Jump labels provide an interface to generate dynamic branches using
|
2015-07-30 06:59:48 +03:00
|
|
|
* self-modifying code. Assuming toolchain and architecture support, if we
|
|
|
|
* define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
|
|
|
|
* an "if (static_branch_unlikely(&key))" statement is an unconditional branch
|
|
|
|
* (which defaults to false - and the true block is placed out of line).
|
|
|
|
* Similarly, we can define an initially true key via
|
|
|
|
* "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
|
|
|
|
* "if (static_branch_unlikely(&key))", in which case we will generate an
|
|
|
|
* unconditional branch to the out-of-line true branch. Keys that are
|
|
|
|
* initially true or false can be using in both static_branch_unlikely()
|
|
|
|
* and static_branch_likely() statements.
|
2012-01-26 16:32:15 +04:00
|
|
|
*
|
2015-07-30 06:59:48 +03:00
|
|
|
* At runtime we can change the branch target by setting the key
|
|
|
|
* to true via a call to static_branch_enable(), or false using
|
|
|
|
* static_branch_disable(). If the direction of the branch is switched by
|
|
|
|
* these calls then we run-time modify the branch target via a
|
|
|
|
* no-op -> jump or jump -> no-op conversion. For example, for an
|
|
|
|
* initially false key that is used in an "if (static_branch_unlikely(&key))"
|
|
|
|
* statement, setting the key to true requires us to patch in a jump
|
|
|
|
* to the out-of-line of true branch.
|
2012-01-26 16:32:15 +04:00
|
|
|
*
|
2015-09-15 02:11:05 +03:00
|
|
|
* In addition to static_branch_{enable,disable}, we can also reference count
|
2015-07-30 06:59:48 +03:00
|
|
|
* the key or branch direction via static_branch_{inc,dec}. Thus,
|
|
|
|
* static_branch_inc() can be thought of as a 'make more true' and
|
2015-09-15 02:11:05 +03:00
|
|
|
* static_branch_dec() as a 'make more false'.
|
2015-07-30 06:59:48 +03:00
|
|
|
*
|
|
|
|
* Since this relies on modifying code, the branch modifying functions
|
2012-01-26 16:32:15 +04:00
|
|
|
* must be considered absolute slow paths (machine wide synchronization etc.).
|
2014-08-10 10:53:39 +04:00
|
|
|
* OTOH, since the affected branches are unconditional, their runtime overhead
|
2012-01-26 16:32:15 +04:00
|
|
|
* will be absolutely minimal, esp. in the default (off) case where the total
|
|
|
|
* effect is a single NOP of appropriate size. The on case will patch in a jump
|
|
|
|
* to the out-of-line block.
|
|
|
|
*
|
2014-08-10 10:53:39 +04:00
|
|
|
* When the control is directly exposed to userspace, it is prudent to delay the
|
2012-01-26 16:32:15 +04:00
|
|
|
* decrement to avoid high frequency code modifications which can (and do)
|
2012-02-24 11:31:31 +04:00
|
|
|
* cause significant performance degradation. Struct static_key_deferred and
|
|
|
|
* static_key_slow_dec_deferred() provide for this.
|
2012-01-26 16:32:15 +04:00
|
|
|
*
|
2015-07-30 06:59:48 +03:00
|
|
|
* Lacking toolchain and or architecture support, static keys fall back to a
|
|
|
|
* simple conditional branch.
|
2012-02-24 11:31:31 +04:00
|
|
|
*
|
2015-07-30 06:59:48 +03:00
|
|
|
* Additional babbling in: Documentation/static-keys.txt
|
2014-08-10 10:53:39 +04:00
|
|
|
*/
|
2012-01-26 16:32:15 +04:00
|
|
|
|
2015-04-09 06:51:31 +03:00
|
|
|
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
|
|
|
# define HAVE_JUMP_LABEL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2011-03-17 00:29:47 +03:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/compiler.h>
|
2013-10-19 23:48:53 +04:00
|
|
|
|
|
|
|
extern bool static_key_initialized;
|
|
|
|
|
|
|
|
#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
|
|
|
|
"%s used before call to jump_label_init", \
|
|
|
|
__func__)
|
2011-03-17 00:29:47 +03:00
|
|
|
|
2015-04-09 06:51:31 +03:00
|
|
|
#ifdef HAVE_JUMP_LABEL
|
2011-03-17 00:29:47 +03:00
|
|
|
|
2012-02-24 11:31:31 +04:00
|
|
|
struct static_key {
|
2011-03-17 00:29:47 +03:00
|
|
|
atomic_t enabled;
|
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
|
|
|
/*
|
2017-03-03 01:28:45 +03:00
|
|
|
* Note:
|
|
|
|
* To make anonymous unions work with old compilers, the static
|
|
|
|
* initialization of them requires brackets. This creates a dependency
|
|
|
|
* on the order of the struct with the initializers. If any fields
|
|
|
|
* are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
|
|
|
|
* to be modified.
|
|
|
|
*
|
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
|
|
|
* bit 0 => 1 if key is initially true
|
|
|
|
* 0 if initially false
|
|
|
|
* bit 1 => 1 if points to struct static_key_mod
|
|
|
|
* 0 if points to struct jump_entry
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
unsigned long type;
|
|
|
|
struct jump_entry *entries;
|
|
|
|
struct static_key_mod *next;
|
|
|
|
};
|
2011-03-17 00:29:47 +03:00
|
|
|
};
|
|
|
|
|
2014-06-05 03:10:07 +04:00
|
|
|
#else
|
|
|
|
struct static_key {
|
|
|
|
atomic_t enabled;
|
|
|
|
};
|
2015-04-09 06:51:31 +03:00
|
|
|
#endif /* HAVE_JUMP_LABEL */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
#include <asm/jump_label.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
2010-09-17 19:09:00 +04:00
|
|
|
|
|
|
|
enum jump_label_type {
|
2015-07-24 15:45:44 +03:00
|
|
|
JUMP_LABEL_NOP = 0,
|
|
|
|
JUMP_LABEL_JMP,
|
2010-09-17 19:09:00 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct module;
|
|
|
|
|
2016-06-21 19:52:17 +03:00
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
|
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
|
|
|
#define JUMP_TYPE_FALSE 0UL
|
|
|
|
#define JUMP_TYPE_TRUE 1UL
|
|
|
|
#define JUMP_TYPE_LINKED 2UL
|
|
|
|
#define JUMP_TYPE_MASK 3UL
|
2012-02-24 11:31:31 +04:00
|
|
|
|
|
|
|
static __always_inline bool static_key_false(struct static_key *key)
|
|
|
|
{
|
2015-07-24 16:09:55 +03:00
|
|
|
return arch_static_branch(key, false);
|
2012-02-24 11:31:31 +04:00
|
|
|
}
|
2011-03-17 00:29:47 +03:00
|
|
|
|
2012-02-24 11:31:31 +04:00
|
|
|
static __always_inline bool static_key_true(struct static_key *key)
|
|
|
|
{
|
2015-07-24 16:09:55 +03:00
|
|
|
return !arch_static_branch(key, true);
|
2012-02-24 11:31:31 +04:00
|
|
|
}
|
|
|
|
|
2010-09-17 19:09:00 +04:00
|
|
|
extern struct jump_entry __start___jump_table[];
|
|
|
|
extern struct jump_entry __stop___jump_table[];
|
|
|
|
|
2011-10-13 03:17:54 +04:00
|
|
|
extern void jump_label_init(void);
|
2010-10-02 01:23:48 +04:00
|
|
|
extern void jump_label_lock(void);
|
|
|
|
extern void jump_label_unlock(void);
|
2010-09-17 19:09:00 +04:00
|
|
|
extern void arch_jump_label_transform(struct jump_entry *entry,
|
2011-09-29 22:10:05 +04:00
|
|
|
enum jump_label_type type);
|
2011-10-03 22:01:46 +04:00
|
|
|
extern void arch_jump_label_transform_static(struct jump_entry *entry,
|
|
|
|
enum jump_label_type type);
|
2010-09-17 19:09:08 +04:00
|
|
|
extern int jump_label_text_reserved(void *start, void *end);
|
2012-02-24 11:31:31 +04:00
|
|
|
extern void static_key_slow_inc(struct static_key *key);
|
|
|
|
extern void static_key_slow_dec(struct static_key *key);
|
2011-03-17 00:29:47 +03:00
|
|
|
extern void jump_label_apply_nops(struct module *mod);
|
2016-08-03 23:46:36 +03:00
|
|
|
extern int static_key_count(struct static_key *key);
|
|
|
|
extern void static_key_enable(struct static_key *key);
|
|
|
|
extern void static_key_disable(struct static_key *key);
|
2017-08-01 11:02:56 +03:00
|
|
|
extern void static_key_enable_cpuslocked(struct static_key *key);
|
|
|
|
extern void static_key_disable_cpuslocked(struct static_key *key);
|
2012-02-24 11:31:31 +04:00
|
|
|
|
2016-08-03 23:46:36 +03:00
|
|
|
/*
|
|
|
|
* We should be using ATOMIC_INIT() for initializing .enabled, but
|
|
|
|
* the inclusion of atomic.h is problematic for inclusion of jump_label.h
|
|
|
|
* in 'low-level' headers. Thus, we are initializing .enabled with a
|
|
|
|
* raw value, but have added a BUILD_BUG_ON() to catch any issues in
|
|
|
|
* jump_label_init() see: kernel/jump_label.c.
|
|
|
|
*/
|
2015-07-24 16:09:55 +03:00
|
|
|
#define STATIC_KEY_INIT_TRUE \
|
2016-08-03 23:46:36 +03:00
|
|
|
{ .enabled = { 1 }, \
|
2017-02-28 19:32:22 +03:00
|
|
|
{ .entries = (void *)JUMP_TYPE_TRUE } }
|
2015-07-24 16:09:55 +03:00
|
|
|
#define STATIC_KEY_INIT_FALSE \
|
2016-08-03 23:46:36 +03:00
|
|
|
{ .enabled = { 0 }, \
|
2017-02-28 19:32:22 +03:00
|
|
|
{ .entries = (void *)JUMP_TYPE_FALSE } }
|
2010-09-17 19:09:00 +04:00
|
|
|
|
2011-10-13 03:17:54 +04:00
|
|
|
#else /* !HAVE_JUMP_LABEL */
|
2010-09-17 19:09:00 +04:00
|
|
|
|
2016-08-03 23:46:36 +03:00
|
|
|
#include <linux/atomic.h>
|
|
|
|
#include <linux/bug.h>
|
|
|
|
|
2016-06-21 19:52:17 +03:00
|
|
|
static inline int static_key_count(struct static_key *key)
|
|
|
|
{
|
|
|
|
return atomic_read(&key->enabled);
|
|
|
|
}
|
|
|
|
|
2011-10-13 03:17:54 +04:00
|
|
|
static __always_inline void jump_label_init(void)
|
|
|
|
{
|
2013-10-19 23:48:53 +04:00
|
|
|
static_key_initialized = true;
|
2011-10-13 03:17:54 +04:00
|
|
|
}
|
|
|
|
|
2012-02-24 11:31:31 +04:00
|
|
|
static __always_inline bool static_key_false(struct static_key *key)
|
|
|
|
{
|
2014-06-05 03:10:07 +04:00
|
|
|
if (unlikely(static_key_count(key) > 0))
|
2012-02-24 11:31:31 +04:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline bool static_key_true(struct static_key *key)
|
2011-03-17 00:29:47 +03:00
|
|
|
{
|
2014-06-05 03:10:07 +04:00
|
|
|
if (likely(static_key_count(key) > 0))
|
2011-03-17 00:29:47 +03:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-17 19:09:00 +04:00
|
|
|
|
2012-02-24 11:31:31 +04:00
|
|
|
static inline void static_key_slow_inc(struct static_key *key)
|
2011-03-17 00:29:47 +03:00
|
|
|
{
|
2013-10-19 23:48:53 +04:00
|
|
|
STATIC_KEY_CHECK_USE();
|
2011-03-17 00:29:47 +03:00
|
|
|
atomic_inc(&key->enabled);
|
|
|
|
}
|
2010-09-17 19:09:00 +04:00
|
|
|
|
2012-02-24 11:31:31 +04:00
|
|
|
static inline void static_key_slow_dec(struct static_key *key)
|
2010-09-17 19:09:00 +04:00
|
|
|
{
|
2013-10-19 23:48:53 +04:00
|
|
|
STATIC_KEY_CHECK_USE();
|
2011-03-17 00:29:47 +03:00
|
|
|
atomic_dec(&key->enabled);
|
2010-09-17 19:09:00 +04:00
|
|
|
}
|
|
|
|
|
2010-09-17 19:09:08 +04:00
|
|
|
static inline int jump_label_text_reserved(void *start, void *end)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-02 01:23:48 +04:00
|
|
|
static inline void jump_label_lock(void) {}
|
|
|
|
static inline void jump_label_unlock(void) {}
|
|
|
|
|
2011-03-17 00:29:47 +03:00
|
|
|
static inline int jump_label_apply_nops(struct module *mod)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-11-27 19:59:09 +04:00
|
|
|
|
2015-07-24 16:03:40 +03:00
|
|
|
static inline void static_key_enable(struct static_key *key)
|
|
|
|
{
|
2017-08-01 18:24:04 +03:00
|
|
|
STATIC_KEY_CHECK_USE();
|
2015-07-24 16:03:40 +03:00
|
|
|
|
2017-08-01 18:24:04 +03:00
|
|
|
if (atomic_read(&key->enabled) != 0) {
|
|
|
|
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
atomic_set(&key->enabled, 1);
|
2015-07-24 16:03:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void static_key_disable(struct static_key *key)
|
|
|
|
{
|
2017-08-01 18:24:04 +03:00
|
|
|
STATIC_KEY_CHECK_USE();
|
2015-07-24 16:03:40 +03:00
|
|
|
|
2017-08-01 18:24:04 +03:00
|
|
|
if (atomic_read(&key->enabled) != 1) {
|
|
|
|
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
atomic_set(&key->enabled, 0);
|
2015-07-24 16:03:40 +03:00
|
|
|
}
|
|
|
|
|
2017-08-01 11:02:56 +03:00
|
|
|
#define static_key_enable_cpuslocked(k) static_key_enable((k))
|
|
|
|
#define static_key_disable_cpuslocked(k) static_key_disable((k))
|
|
|
|
|
2016-08-03 23:46:36 +03:00
|
|
|
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
|
|
|
|
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
|
|
|
|
|
|
|
|
#endif /* HAVE_JUMP_LABEL */
|
|
|
|
|
|
|
|
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
|
|
|
|
#define jump_label_enabled static_key_enabled
|
|
|
|
|
2015-07-24 16:09:55 +03:00
|
|
|
/* -------------------------------------------------------------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Two type wrappers around static_key, such that we can use compile time
|
|
|
|
* type differentiation to emit the right code.
|
|
|
|
*
|
|
|
|
* All the below code is macros in order to play type games.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct static_key_true {
|
|
|
|
struct static_key key;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct static_key_false {
|
|
|
|
struct static_key key;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
|
|
|
|
#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
|
|
|
|
|
|
|
|
#define DEFINE_STATIC_KEY_TRUE(name) \
|
|
|
|
struct static_key_true name = STATIC_KEY_TRUE_INIT
|
|
|
|
|
2016-09-01 21:39:33 +03:00
|
|
|
#define DECLARE_STATIC_KEY_TRUE(name) \
|
|
|
|
extern struct static_key_true name
|
|
|
|
|
2015-07-24 16:09:55 +03:00
|
|
|
#define DEFINE_STATIC_KEY_FALSE(name) \
|
|
|
|
struct static_key_false name = STATIC_KEY_FALSE_INIT
|
|
|
|
|
2016-09-01 21:39:33 +03:00
|
|
|
#define DECLARE_STATIC_KEY_FALSE(name) \
|
|
|
|
extern struct static_key_false name
|
|
|
|
|
2016-09-05 20:25:47 +03:00
|
|
|
#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
|
|
|
|
struct static_key_true name[count] = { \
|
|
|
|
[0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
|
|
|
|
struct static_key_false name[count] = { \
|
|
|
|
[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
|
|
|
|
}
|
|
|
|
|
2015-09-18 18:56:28 +03:00
|
|
|
extern bool ____wrong_branch_error(void);
|
|
|
|
|
|
|
|
#define static_key_enabled(x) \
|
|
|
|
({ \
|
|
|
|
if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
|
|
|
|
!__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
|
|
|
|
!__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
|
|
|
|
____wrong_branch_error(); \
|
|
|
|
static_key_count((struct static_key *)x) > 0; \
|
|
|
|
})
|
|
|
|
|
2015-07-24 16:09:55 +03:00
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Combine the right initial value (type) with the right branch order
|
|
|
|
* to generate the desired result.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* type\branch| likely (1) | unlikely (0)
|
|
|
|
* -----------+-----------------------+------------------
|
|
|
|
* | |
|
|
|
|
* true (1) | ... | ...
|
|
|
|
* | NOP | JMP L
|
|
|
|
* | <br-stmts> | 1: ...
|
|
|
|
* | L: ... |
|
|
|
|
* | |
|
|
|
|
* | | L: <br-stmts>
|
|
|
|
* | | jmp 1b
|
|
|
|
* | |
|
|
|
|
* -----------+-----------------------+------------------
|
|
|
|
* | |
|
|
|
|
* false (0) | ... | ...
|
|
|
|
* | JMP L | NOP
|
|
|
|
* | <br-stmts> | 1: ...
|
|
|
|
* | L: ... |
|
|
|
|
* | |
|
|
|
|
* | | L: <br-stmts>
|
|
|
|
* | | jmp 1b
|
|
|
|
* | |
|
|
|
|
* -----------+-----------------------+------------------
|
|
|
|
*
|
|
|
|
* The initial value is encoded in the LSB of static_key::entries,
|
|
|
|
* type: 0 = false, 1 = true.
|
|
|
|
*
|
|
|
|
* The branch type is encoded in the LSB of jump_entry::key,
|
|
|
|
* branch: 0 = unlikely, 1 = likely.
|
|
|
|
*
|
|
|
|
* This gives the following logic table:
|
|
|
|
*
|
|
|
|
* enabled type branch instuction
|
|
|
|
* -----------------------------+-----------
|
|
|
|
* 0 0 0 | NOP
|
|
|
|
* 0 0 1 | JMP
|
|
|
|
* 0 1 0 | NOP
|
|
|
|
* 0 1 1 | JMP
|
|
|
|
*
|
|
|
|
* 1 0 0 | JMP
|
|
|
|
* 1 0 1 | NOP
|
|
|
|
* 1 1 0 | JMP
|
|
|
|
* 1 1 1 | NOP
|
|
|
|
*
|
|
|
|
* Which gives the following functions:
|
|
|
|
*
|
|
|
|
* dynamic: instruction = enabled ^ branch
|
|
|
|
* static: instruction = type ^ branch
|
|
|
|
*
|
|
|
|
* See jump_label_type() / jump_label_init_type().
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define static_branch_likely(x) \
|
|
|
|
({ \
|
|
|
|
bool branch; \
|
|
|
|
if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
|
|
|
|
branch = !arch_static_branch(&(x)->key, true); \
|
|
|
|
else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
|
|
|
|
branch = !arch_static_branch_jump(&(x)->key, true); \
|
|
|
|
else \
|
|
|
|
branch = ____wrong_branch_error(); \
|
|
|
|
branch; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define static_branch_unlikely(x) \
|
|
|
|
({ \
|
|
|
|
bool branch; \
|
|
|
|
if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
|
|
|
|
branch = arch_static_branch_jump(&(x)->key, false); \
|
|
|
|
else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
|
|
|
|
branch = arch_static_branch(&(x)->key, false); \
|
|
|
|
else \
|
|
|
|
branch = ____wrong_branch_error(); \
|
|
|
|
branch; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#else /* !HAVE_JUMP_LABEL */
|
|
|
|
|
|
|
|
#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
|
|
|
|
#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
|
|
|
|
|
|
|
|
#endif /* HAVE_JUMP_LABEL */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advanced usage; refcount, branch is enabled when: count != 0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
|
|
|
|
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normal usage; boolean enable/disable.
|
|
|
|
*/
|
|
|
|
|
2017-08-01 11:02:56 +03:00
|
|
|
#define static_branch_enable(x) static_key_enable(&(x)->key)
|
|
|
|
#define static_branch_disable(x) static_key_disable(&(x)->key)
|
|
|
|
#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
|
|
|
|
#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
|
2015-07-24 16:09:55 +03:00
|
|
|
|
2015-04-09 06:51:31 +03:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2017-01-18 20:38:04 +03:00
|
|
|
|
|
|
|
#endif /* _LINUX_JUMP_LABEL_H */
|