static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does all the cleanups I talked about and turns jump labels into a more intuitive to use facility. It should also address the various misconceptions and confusions that surround jump labels. Typical usage scenarios: #include <linux/static_key.h> struct static_key key = STATIC_KEY_INIT_TRUE; if (static_key_false(&key)) do unlikely code else do likely code Or: if (static_key_true(&key)) do likely code else do unlikely code The static key is modified via: static_key_slow_inc(&key); ... static_key_slow_dec(&key); The 'slow' prefix makes it abundantly clear that this is an expensive operation. I've updated all in-kernel code to use this everywhere. Note that I (intentionally) have not pushed through the rename blindly through to the lowest levels: the actual jump-label patching arch facility should be named like that, so we want to decouple jump labels from the static-key facility a bit. On non-jump-label enabled architectures static keys default to likely()/unlikely() branches. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jason Baron <jbaron@redhat.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: a.p.zijlstra@chello.nl Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -128,7 +128,7 @@ enum event_type_t {
|
||||
* perf_sched_events : >0 events exist
|
||||
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
|
||||
*/
|
||||
struct jump_label_key_deferred perf_sched_events __read_mostly;
|
||||
struct static_key_deferred perf_sched_events __read_mostly;
|
||||
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
||||
|
||||
static atomic_t nr_mmap_events __read_mostly;
|
||||
@@ -2769,7 +2769,7 @@ static void free_event(struct perf_event *event)
|
||||
|
||||
if (!event->parent) {
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
jump_label_dec_deferred(&perf_sched_events);
|
||||
static_key_slow_dec_deferred(&perf_sched_events);
|
||||
if (event->attr.mmap || event->attr.mmap_data)
|
||||
atomic_dec(&nr_mmap_events);
|
||||
if (event->attr.comm)
|
||||
@@ -2780,7 +2780,7 @@ static void free_event(struct perf_event *event)
|
||||
put_callchain_buffers();
|
||||
if (is_cgroup_event(event)) {
|
||||
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
|
||||
jump_label_dec_deferred(&perf_sched_events);
|
||||
static_key_slow_dec_deferred(&perf_sched_events);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4982,7 +4982,7 @@ fail:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
||||
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
||||
|
||||
static void sw_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
@@ -4990,7 +4990,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
|
||||
|
||||
WARN_ON(event->parent);
|
||||
|
||||
jump_label_dec(&perf_swevent_enabled[event_id]);
|
||||
static_key_slow_dec(&perf_swevent_enabled[event_id]);
|
||||
swevent_hlist_put(event);
|
||||
}
|
||||
|
||||
@@ -5020,7 +5020,7 @@ static int perf_swevent_init(struct perf_event *event)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
jump_label_inc(&perf_swevent_enabled[event_id]);
|
||||
static_key_slow_inc(&perf_swevent_enabled[event_id]);
|
||||
event->destroy = sw_perf_event_destroy;
|
||||
}
|
||||
|
||||
@@ -5843,7 +5843,7 @@ done:
|
||||
|
||||
if (!event->parent) {
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
jump_label_inc(&perf_sched_events.key);
|
||||
static_key_slow_inc(&perf_sched_events.key);
|
||||
if (event->attr.mmap || event->attr.mmap_data)
|
||||
atomic_inc(&nr_mmap_events);
|
||||
if (event->attr.comm)
|
||||
@@ -6081,7 +6081,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
* - that may need work on context switch
|
||||
*/
|
||||
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
|
||||
jump_label_inc(&perf_sched_events.key);
|
||||
static_key_slow_inc(&perf_sched_events.key);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -12,7 +12,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/static_key.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
@@ -29,10 +29,11 @@ void jump_label_unlock(void)
|
||||
mutex_unlock(&jump_label_mutex);
|
||||
}
|
||||
|
||||
bool jump_label_enabled(struct jump_label_key *key)
|
||||
bool static_key_enabled(struct static_key *key)
|
||||
{
|
||||
return !!atomic_read(&key->enabled);
|
||||
return (atomic_read(&key->enabled) > 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(static_key_enabled);
|
||||
|
||||
static int jump_label_cmp(const void *a, const void *b)
|
||||
{
|
||||
@@ -58,22 +59,26 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
|
||||
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
|
||||
}
|
||||
|
||||
static void jump_label_update(struct jump_label_key *key, int enable);
|
||||
static void jump_label_update(struct static_key *key, int enable);
|
||||
|
||||
void jump_label_inc(struct jump_label_key *key)
|
||||
void static_key_slow_inc(struct static_key *key)
|
||||
{
|
||||
if (atomic_inc_not_zero(&key->enabled))
|
||||
return;
|
||||
|
||||
jump_label_lock();
|
||||
if (atomic_read(&key->enabled) == 0)
|
||||
jump_label_update(key, JUMP_LABEL_ENABLE);
|
||||
if (atomic_read(&key->enabled) == 0) {
|
||||
if (!jump_label_get_branch_default(key))
|
||||
jump_label_update(key, JUMP_LABEL_ENABLE);
|
||||
else
|
||||
jump_label_update(key, JUMP_LABEL_DISABLE);
|
||||
}
|
||||
atomic_inc(&key->enabled);
|
||||
jump_label_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(jump_label_inc);
|
||||
EXPORT_SYMBOL_GPL(static_key_slow_inc);
|
||||
|
||||
static void __jump_label_dec(struct jump_label_key *key,
|
||||
static void __static_key_slow_dec(struct static_key *key,
|
||||
unsigned long rate_limit, struct delayed_work *work)
|
||||
{
|
||||
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
|
||||
@@ -85,32 +90,35 @@ static void __jump_label_dec(struct jump_label_key *key,
|
||||
if (rate_limit) {
|
||||
atomic_inc(&key->enabled);
|
||||
schedule_delayed_work(work, rate_limit);
|
||||
} else
|
||||
jump_label_update(key, JUMP_LABEL_DISABLE);
|
||||
|
||||
} else {
|
||||
if (!jump_label_get_branch_default(key))
|
||||
jump_label_update(key, JUMP_LABEL_DISABLE);
|
||||
else
|
||||
jump_label_update(key, JUMP_LABEL_ENABLE);
|
||||
}
|
||||
jump_label_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(jump_label_dec);
|
||||
|
||||
static void jump_label_update_timeout(struct work_struct *work)
|
||||
{
|
||||
struct jump_label_key_deferred *key =
|
||||
container_of(work, struct jump_label_key_deferred, work.work);
|
||||
__jump_label_dec(&key->key, 0, NULL);
|
||||
struct static_key_deferred *key =
|
||||
container_of(work, struct static_key_deferred, work.work);
|
||||
__static_key_slow_dec(&key->key, 0, NULL);
|
||||
}
|
||||
|
||||
void jump_label_dec(struct jump_label_key *key)
|
||||
void static_key_slow_dec(struct static_key *key)
|
||||
{
|
||||
__jump_label_dec(key, 0, NULL);
|
||||
__static_key_slow_dec(key, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(static_key_slow_dec);
|
||||
|
||||
void jump_label_dec_deferred(struct jump_label_key_deferred *key)
|
||||
void static_key_slow_dec_deferred(struct static_key_deferred *key)
|
||||
{
|
||||
__jump_label_dec(&key->key, key->timeout, &key->work);
|
||||
__static_key_slow_dec(&key->key, key->timeout, &key->work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
|
||||
|
||||
|
||||
void jump_label_rate_limit(struct jump_label_key_deferred *key,
|
||||
void jump_label_rate_limit(struct static_key_deferred *key,
|
||||
unsigned long rl)
|
||||
{
|
||||
key->timeout = rl;
|
||||
@@ -153,7 +161,7 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
|
||||
arch_jump_label_transform(entry, type);
|
||||
}
|
||||
|
||||
static void __jump_label_update(struct jump_label_key *key,
|
||||
static void __jump_label_update(struct static_key *key,
|
||||
struct jump_entry *entry,
|
||||
struct jump_entry *stop, int enable)
|
||||
{
|
||||
@@ -170,27 +178,40 @@ static void __jump_label_update(struct jump_label_key *key,
|
||||
}
|
||||
}
|
||||
|
||||
static enum jump_label_type jump_label_type(struct static_key *key)
|
||||
{
|
||||
bool true_branch = jump_label_get_branch_default(key);
|
||||
bool state = static_key_enabled(key);
|
||||
|
||||
if ((!true_branch && state) || (true_branch && !state))
|
||||
return JUMP_LABEL_ENABLE;
|
||||
|
||||
return JUMP_LABEL_DISABLE;
|
||||
}
|
||||
|
||||
void __init jump_label_init(void)
|
||||
{
|
||||
struct jump_entry *iter_start = __start___jump_table;
|
||||
struct jump_entry *iter_stop = __stop___jump_table;
|
||||
struct jump_label_key *key = NULL;
|
||||
struct static_key *key = NULL;
|
||||
struct jump_entry *iter;
|
||||
|
||||
jump_label_lock();
|
||||
jump_label_sort_entries(iter_start, iter_stop);
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
struct jump_label_key *iterk;
|
||||
struct static_key *iterk;
|
||||
|
||||
iterk = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
|
||||
JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
|
||||
iterk = (struct static_key *)(unsigned long)iter->key;
|
||||
arch_jump_label_transform_static(iter, jump_label_type(iterk));
|
||||
if (iterk == key)
|
||||
continue;
|
||||
|
||||
key = iterk;
|
||||
key->entries = iter;
|
||||
/*
|
||||
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
|
||||
*/
|
||||
*((unsigned long *)&key->entries) += (unsigned long)iter;
|
||||
#ifdef CONFIG_MODULES
|
||||
key->next = NULL;
|
||||
#endif
|
||||
@@ -200,8 +221,8 @@ void __init jump_label_init(void)
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
struct jump_label_mod {
|
||||
struct jump_label_mod *next;
|
||||
struct static_key_mod {
|
||||
struct static_key_mod *next;
|
||||
struct jump_entry *entries;
|
||||
struct module *mod;
|
||||
};
|
||||
@@ -221,9 +242,9 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
|
||||
start, end);
|
||||
}
|
||||
|
||||
static void __jump_label_mod_update(struct jump_label_key *key, int enable)
|
||||
static void __jump_label_mod_update(struct static_key *key, int enable)
|
||||
{
|
||||
struct jump_label_mod *mod = key->next;
|
||||
struct static_key_mod *mod = key->next;
|
||||
|
||||
while (mod) {
|
||||
struct module *m = mod->mod;
|
||||
@@ -254,11 +275,7 @@ void jump_label_apply_nops(struct module *mod)
|
||||
return;
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
struct jump_label_key *iterk;
|
||||
|
||||
iterk = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
|
||||
JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
|
||||
arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,8 +284,8 @@ static int jump_label_add_module(struct module *mod)
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
||||
struct jump_entry *iter;
|
||||
struct jump_label_key *key = NULL;
|
||||
struct jump_label_mod *jlm;
|
||||
struct static_key *key = NULL;
|
||||
struct static_key_mod *jlm;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (iter_start == iter_stop)
|
||||
@@ -277,28 +294,30 @@ static int jump_label_add_module(struct module *mod)
|
||||
jump_label_sort_entries(iter_start, iter_stop);
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
if (iter->key == (jump_label_t)(unsigned long)key)
|
||||
struct static_key *iterk;
|
||||
|
||||
iterk = (struct static_key *)(unsigned long)iter->key;
|
||||
if (iterk == key)
|
||||
continue;
|
||||
|
||||
key = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
|
||||
key = iterk;
|
||||
if (__module_address(iter->key) == mod) {
|
||||
atomic_set(&key->enabled, 0);
|
||||
key->entries = iter;
|
||||
/*
|
||||
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
|
||||
*/
|
||||
*((unsigned long *)&key->entries) += (unsigned long)iter;
|
||||
key->next = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
|
||||
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
|
||||
if (!jlm)
|
||||
return -ENOMEM;
|
||||
|
||||
jlm->mod = mod;
|
||||
jlm->entries = iter;
|
||||
jlm->next = key->next;
|
||||
key->next = jlm;
|
||||
|
||||
if (jump_label_enabled(key))
|
||||
if (jump_label_type(key) == JUMP_LABEL_ENABLE)
|
||||
__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
|
||||
}
|
||||
|
||||
@@ -310,14 +329,14 @@ static void jump_label_del_module(struct module *mod)
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
||||
struct jump_entry *iter;
|
||||
struct jump_label_key *key = NULL;
|
||||
struct jump_label_mod *jlm, **prev;
|
||||
struct static_key *key = NULL;
|
||||
struct static_key_mod *jlm, **prev;
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
if (iter->key == (jump_label_t)(unsigned long)key)
|
||||
continue;
|
||||
|
||||
key = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
key = (struct static_key *)(unsigned long)iter->key;
|
||||
|
||||
if (__module_address(iter->key) == mod)
|
||||
continue;
|
||||
@@ -419,9 +438,10 @@ int jump_label_text_reserved(void *start, void *end)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void jump_label_update(struct jump_label_key *key, int enable)
|
||||
static void jump_label_update(struct static_key *key, int enable)
|
||||
{
|
||||
struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
|
||||
struct jump_entry *stop = __stop___jump_table;
|
||||
struct jump_entry *entry = jump_label_get_entries(key);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
struct module *mod = __module_address((unsigned long)key);
|
||||
|
@@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v)
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
#define jump_label_key__true jump_label_key_enabled
|
||||
#define jump_label_key__false jump_label_key_disabled
|
||||
#define jump_label_key__true STATIC_KEY_INIT_TRUE
|
||||
#define jump_label_key__false STATIC_KEY_INIT_FALSE
|
||||
|
||||
#define SCHED_FEAT(name, enabled) \
|
||||
jump_label_key__##enabled ,
|
||||
|
||||
struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
|
||||
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
|
||||
#include "features.h"
|
||||
};
|
||||
|
||||
@@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
|
||||
|
||||
static void sched_feat_disable(int i)
|
||||
{
|
||||
if (jump_label_enabled(&sched_feat_keys[i]))
|
||||
jump_label_dec(&sched_feat_keys[i]);
|
||||
if (static_key_enabled(&sched_feat_keys[i]))
|
||||
static_key_slow_dec(&sched_feat_keys[i]);
|
||||
}
|
||||
|
||||
static void sched_feat_enable(int i)
|
||||
{
|
||||
if (!jump_label_enabled(&sched_feat_keys[i]))
|
||||
jump_label_inc(&sched_feat_keys[i]);
|
||||
if (!static_key_enabled(&sched_feat_keys[i]))
|
||||
static_key_slow_inc(&sched_feat_keys[i]);
|
||||
}
|
||||
#else
|
||||
static void sched_feat_disable(int i) { };
|
||||
@@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
||||
delta -= irq_delta;
|
||||
#endif
|
||||
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
|
||||
if (static_branch((¶virt_steal_rq_enabled))) {
|
||||
if (static_key_false((¶virt_steal_rq_enabled))) {
|
||||
u64 st;
|
||||
|
||||
steal = paravirt_steal_clock(cpu_of(rq));
|
||||
@@ -2756,7 +2756,7 @@ void account_idle_time(cputime_t cputime)
|
||||
static __always_inline bool steal_account_process_tick(void)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
if (static_branch(¶virt_steal_enabled)) {
|
||||
if (static_key_false(¶virt_steal_enabled)) {
|
||||
u64 steal, st = 0;
|
||||
|
||||
steal = paravirt_steal_clock(smp_processor_id());
|
||||
|
@@ -1399,20 +1399,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
static struct jump_label_key __cfs_bandwidth_used;
|
||||
static struct static_key __cfs_bandwidth_used;
|
||||
|
||||
static inline bool cfs_bandwidth_used(void)
|
||||
{
|
||||
return static_branch(&__cfs_bandwidth_used);
|
||||
return static_key_false(&__cfs_bandwidth_used);
|
||||
}
|
||||
|
||||
void account_cfs_bandwidth_used(int enabled, int was_enabled)
|
||||
{
|
||||
/* only need to count groups transitioning between enabled/!enabled */
|
||||
if (enabled && !was_enabled)
|
||||
jump_label_inc(&__cfs_bandwidth_used);
|
||||
static_key_slow_inc(&__cfs_bandwidth_used);
|
||||
else if (!enabled && was_enabled)
|
||||
jump_label_dec(&__cfs_bandwidth_used);
|
||||
static_key_slow_dec(&__cfs_bandwidth_used);
|
||||
}
|
||||
#else /* HAVE_JUMP_LABEL */
|
||||
static bool cfs_bandwidth_used(void)
|
||||
|
@@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
||||
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
||||
*/
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
# include <linux/jump_label.h>
|
||||
# include <linux/static_key.h>
|
||||
# define const_debug __read_mostly
|
||||
#else
|
||||
# define const_debug const
|
||||
@@ -630,18 +630,18 @@ enum {
|
||||
#undef SCHED_FEAT
|
||||
|
||||
#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
|
||||
static __always_inline bool static_branch__true(struct jump_label_key *key)
|
||||
static __always_inline bool static_branch__true(struct static_key *key)
|
||||
{
|
||||
return likely(static_branch(key)); /* Not out of line branch. */
|
||||
return static_key_true(key); /* Not out of line branch. */
|
||||
}
|
||||
|
||||
static __always_inline bool static_branch__false(struct jump_label_key *key)
|
||||
static __always_inline bool static_branch__false(struct static_key *key)
|
||||
{
|
||||
return unlikely(static_branch(key)); /* Out of line branch. */
|
||||
return static_key_false(key); /* Out of line branch. */
|
||||
}
|
||||
|
||||
#define SCHED_FEAT(name, enabled) \
|
||||
static __always_inline bool static_branch_##name(struct jump_label_key *key) \
|
||||
static __always_inline bool static_branch_##name(struct static_key *key) \
|
||||
{ \
|
||||
return static_branch__##enabled(key); \
|
||||
}
|
||||
@@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \
|
||||
|
||||
#undef SCHED_FEAT
|
||||
|
||||
extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
|
||||
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
|
||||
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
|
||||
#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
|
||||
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
|
||||
|
@@ -25,7 +25,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/static_key.h>
|
||||
|
||||
extern struct tracepoint * const __start___tracepoints_ptrs[];
|
||||
extern struct tracepoint * const __stop___tracepoints_ptrs[];
|
||||
@@ -256,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
|
||||
{
|
||||
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
|
||||
|
||||
if (elem->regfunc && !jump_label_enabled(&elem->key) && active)
|
||||
if (elem->regfunc && !static_key_enabled(&elem->key) && active)
|
||||
elem->regfunc();
|
||||
else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active)
|
||||
else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
|
||||
elem->unregfunc();
|
||||
|
||||
/*
|
||||
@@ -269,10 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
|
||||
* is used.
|
||||
*/
|
||||
rcu_assign_pointer(elem->funcs, (*entry)->funcs);
|
||||
if (active && !jump_label_enabled(&elem->key))
|
||||
jump_label_inc(&elem->key);
|
||||
else if (!active && jump_label_enabled(&elem->key))
|
||||
jump_label_dec(&elem->key);
|
||||
if (active && !static_key_enabled(&elem->key))
|
||||
static_key_slow_inc(&elem->key);
|
||||
else if (!active && static_key_enabled(&elem->key))
|
||||
static_key_slow_dec(&elem->key);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -283,11 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
|
||||
*/
|
||||
static void disable_tracepoint(struct tracepoint *elem)
|
||||
{
|
||||
if (elem->unregfunc && jump_label_enabled(&elem->key))
|
||||
if (elem->unregfunc && static_key_enabled(&elem->key))
|
||||
elem->unregfunc();
|
||||
|
||||
if (jump_label_enabled(&elem->key))
|
||||
jump_label_dec(&elem->key);
|
||||
if (static_key_enabled(&elem->key))
|
||||
static_key_slow_dec(&elem->key);
|
||||
rcu_assign_pointer(elem->funcs, NULL);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user