Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
This commit is contained in:
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
||||
int cpu;
|
||||
struct callchain_cpus_entries *entries;
|
||||
|
||||
*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
|
||||
*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
|
||||
if (*rctx == -1)
|
||||
return NULL;
|
||||
|
||||
@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
||||
static void
|
||||
put_callchain_entry(int rctx)
|
||||
{
|
||||
put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
|
||||
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
|
||||
}
|
||||
|
||||
struct perf_callchain_entry *
|
||||
|
@@ -249,7 +249,7 @@ static void perf_duration_warn(struct irq_work *w)
|
||||
u64 avg_local_sample_len;
|
||||
u64 local_samples_len;
|
||||
|
||||
local_samples_len = __get_cpu_var(running_sample_length);
|
||||
local_samples_len = __this_cpu_read(running_sample_length);
|
||||
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
|
||||
|
||||
printk_ratelimited(KERN_WARNING
|
||||
@@ -271,10 +271,10 @@ void perf_sample_event_took(u64 sample_len_ns)
|
||||
return;
|
||||
|
||||
/* decay the counter by 1 average sample */
|
||||
local_samples_len = __get_cpu_var(running_sample_length);
|
||||
local_samples_len = __this_cpu_read(running_sample_length);
|
||||
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
|
||||
local_samples_len += sample_len_ns;
|
||||
__get_cpu_var(running_sample_length) = local_samples_len;
|
||||
__this_cpu_write(running_sample_length, local_samples_len);
|
||||
|
||||
/*
|
||||
* note: this will be biased artifically low until we have
|
||||
@@ -882,7 +882,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
|
||||
static void perf_pmu_rotate_start(struct pmu *pmu)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||
struct list_head *head = &__get_cpu_var(rotation_list);
|
||||
struct list_head *head = this_cpu_ptr(&rotation_list);
|
||||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
@@ -2462,7 +2462,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
* to check if we have to switch out PMU state.
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||
perf_cgroup_sched_out(task, next);
|
||||
}
|
||||
|
||||
@@ -2705,11 +2705,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
* to check if we have to switch in PMU state.
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||
perf_cgroup_sched_in(prev, task);
|
||||
|
||||
/* check for system-wide branch_stack events */
|
||||
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
|
||||
if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
|
||||
perf_branch_stack_sched_in(prev, task);
|
||||
}
|
||||
|
||||
@@ -2964,7 +2964,7 @@ bool perf_event_can_stop_tick(void)
|
||||
|
||||
void perf_event_task_tick(void)
|
||||
{
|
||||
struct list_head *head = &__get_cpu_var(rotation_list);
|
||||
struct list_head *head = this_cpu_ptr(&rotation_list);
|
||||
struct perf_cpu_context *cpuctx, *tmp;
|
||||
struct perf_event_context *ctx;
|
||||
int throttled;
|
||||
@@ -5833,7 +5833,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
||||
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||
struct perf_event *event;
|
||||
struct hlist_head *head;
|
||||
|
||||
@@ -5852,7 +5852,7 @@ end:
|
||||
|
||||
int perf_swevent_get_recursion_context(void)
|
||||
{
|
||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
||||
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||
|
||||
return get_recursion_context(swhash->recursion);
|
||||
}
|
||||
@@ -5860,7 +5860,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
|
||||
|
||||
inline void perf_swevent_put_recursion_context(int rctx)
|
||||
{
|
||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
||||
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||
|
||||
put_recursion_context(swhash->recursion, rctx);
|
||||
}
|
||||
@@ -5889,7 +5889,7 @@ static void perf_swevent_read(struct perf_event *event)
|
||||
|
||||
static int perf_swevent_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
||||
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hlist_head *head;
|
||||
|
||||
|
@@ -699,7 +699,7 @@ void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct irqaction *action = desc->action;
|
||||
void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
|
||||
void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
|
||||
irqreturn_t res;
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work)
|
||||
|
||||
/* If the work is "lazy", handle it from next tick if any */
|
||||
if (work->flags & IRQ_WORK_LAZY) {
|
||||
if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
|
||||
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
||||
tick_nohz_tick_stopped())
|
||||
arch_irq_work_raise();
|
||||
} else {
|
||||
if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
|
||||
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
||||
arch_irq_work_raise();
|
||||
}
|
||||
|
||||
@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void)
|
||||
{
|
||||
struct llist_head *raised, *lazy;
|
||||
|
||||
raised = &__get_cpu_var(raised_list);
|
||||
lazy = &__get_cpu_var(lazy_list);
|
||||
raised = this_cpu_ptr(&raised_list);
|
||||
lazy = this_cpu_ptr(&lazy_list);
|
||||
|
||||
if (llist_empty(raised) || arch_irq_work_has_interrupt())
|
||||
if (llist_empty(lazy))
|
||||
@@ -168,8 +168,8 @@ static void irq_work_run_list(struct llist_head *list)
|
||||
*/
|
||||
void irq_work_run(void)
|
||||
{
|
||||
irq_work_run_list(&__get_cpu_var(raised_list));
|
||||
irq_work_run_list(&__get_cpu_var(lazy_list));
|
||||
irq_work_run_list(this_cpu_ptr(&raised_list));
|
||||
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_work_run);
|
||||
|
||||
|
@@ -2622,7 +2622,7 @@ void wake_up_klogd(void)
|
||||
preempt_disable();
|
||||
if (waitqueue_active(&log_wait)) {
|
||||
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
|
||||
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
|
||||
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
@@ -2638,7 +2638,7 @@ int printk_deferred(const char *fmt, ...)
|
||||
va_end(args);
|
||||
|
||||
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
|
||||
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
|
||||
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
|
||||
preempt_enable();
|
||||
|
||||
return r;
|
||||
|
@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
|
||||
|
||||
static inline struct sched_clock_data *this_scd(void)
|
||||
{
|
||||
return &__get_cpu_var(sched_clock_data);
|
||||
return this_cpu_ptr(&sched_clock_data);
|
||||
}
|
||||
|
||||
static inline struct sched_clock_data *cpu_sdc(int cpu)
|
||||
|
@@ -1153,7 +1153,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
|
||||
static int find_later_rq(struct task_struct *task)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
|
||||
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
|
||||
int this_cpu = smp_processor_id();
|
||||
int best_cpu, cpu = task_cpu(task);
|
||||
|
||||
|
@@ -6615,7 +6615,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
||||
struct sched_group *group;
|
||||
struct rq *busiest;
|
||||
unsigned long flags;
|
||||
struct cpumask *cpus = __get_cpu_var(load_balance_mask);
|
||||
struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
|
||||
|
||||
struct lb_env env = {
|
||||
.sd = sd,
|
||||
|
@@ -1525,7 +1525,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
|
||||
static int find_lowest_rq(struct task_struct *task)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
|
||||
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
|
||||
int this_cpu = smp_processor_id();
|
||||
int cpu = task_cpu(task);
|
||||
|
||||
|
@@ -663,10 +663,10 @@ static inline int cpu_of(struct rq *rq)
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
|
||||
#define this_rq() (&__get_cpu_var(runqueues))
|
||||
#define this_rq() this_cpu_ptr(&runqueues)
|
||||
#define task_rq(p) cpu_rq(task_cpu(p))
|
||||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||
#define raw_rq() (&__raw_get_cpu_var(runqueues))
|
||||
#define raw_rq() raw_cpu_ptr(&runqueues)
|
||||
|
||||
static inline u64 rq_clock(struct rq *rq)
|
||||
{
|
||||
|
@@ -165,7 +165,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
|
||||
if (!csd) {
|
||||
csd = &csd_stack;
|
||||
if (!wait)
|
||||
csd = &__get_cpu_var(csd_data);
|
||||
csd = this_cpu_ptr(&csd_data);
|
||||
}
|
||||
|
||||
csd_lock(csd);
|
||||
@@ -230,7 +230,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
head = &__get_cpu_var(call_single_queue);
|
||||
head = this_cpu_ptr(&call_single_queue);
|
||||
entry = llist_del_all(head);
|
||||
entry = llist_reverse_order(entry);
|
||||
|
||||
@@ -420,7 +420,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
return;
|
||||
}
|
||||
|
||||
cfd = &__get_cpu_var(cfd_data);
|
||||
cfd = this_cpu_ptr(&cfd_data);
|
||||
|
||||
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(this_cpu, cfd->cpumask);
|
||||
|
@@ -485,7 +485,7 @@ static void tasklet_action(struct softirq_action *a)
|
||||
local_irq_disable();
|
||||
list = __this_cpu_read(tasklet_vec.head);
|
||||
__this_cpu_write(tasklet_vec.head, NULL);
|
||||
__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
|
||||
__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -521,7 +521,7 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
local_irq_disable();
|
||||
list = __this_cpu_read(tasklet_hi_vec.head);
|
||||
__this_cpu_write(tasklet_hi_vec.head, NULL);
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
|
||||
__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
|
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
|
||||
fill_tgid_exit(tsk);
|
||||
}
|
||||
|
||||
listeners = __this_cpu_ptr(&listener_array);
|
||||
listeners = raw_cpu_ptr(&listener_array);
|
||||
if (list_empty(&listeners->list))
|
||||
return;
|
||||
|
||||
|
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
||||
static int hrtimer_reprogram(struct hrtimer *timer,
|
||||
struct hrtimer_clock_base *base)
|
||||
{
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||||
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
||||
int res;
|
||||
|
||||
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
|
||||
*/
|
||||
static void retrigger_next_event(void *arg)
|
||||
{
|
||||
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
|
||||
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
|
||||
|
||||
if (!hrtimer_hres_active())
|
||||
return;
|
||||
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
|
||||
*/
|
||||
debug_deactivate(timer);
|
||||
timer_stats_hrtimer_clear_start_info(timer);
|
||||
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
|
||||
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
|
||||
/*
|
||||
* We must preserve the CALLBACK state flag here,
|
||||
* otherwise we could move the timer base in
|
||||
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
* on dynticks target.
|
||||
*/
|
||||
wake_up_nohz_cpu(new_base->cpu_base->cpu);
|
||||
} else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
|
||||
} else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
|
||||
hrtimer_reprogram(timer, new_base)) {
|
||||
/*
|
||||
* Only allow reprogramming if the new base is on this CPU.
|
||||
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
|
||||
*/
|
||||
ktime_t hrtimer_get_next_event(void)
|
||||
{
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||||
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
||||
ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
|
||||
unsigned long flags;
|
||||
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
|
||||
|
||||
memset(timer, 0, sizeof(struct hrtimer));
|
||||
|
||||
cpu_base = &__raw_get_cpu_var(hrtimer_bases);
|
||||
cpu_base = raw_cpu_ptr(&hrtimer_bases);
|
||||
|
||||
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
|
||||
clock_id = CLOCK_MONOTONIC;
|
||||
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
|
||||
struct hrtimer_cpu_base *cpu_base;
|
||||
int base = hrtimer_clockid_to_base(which_clock);
|
||||
|
||||
cpu_base = &__raw_get_cpu_var(hrtimer_bases);
|
||||
cpu_base = raw_cpu_ptr(&hrtimer_bases);
|
||||
*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
|
||||
|
||||
return 0;
|
||||
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
|
||||
*/
|
||||
void hrtimer_interrupt(struct clock_event_device *dev)
|
||||
{
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||||
ktime_t expires_next, now, entry_time, delta;
|
||||
int i, retries = 0;
|
||||
|
||||
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
|
||||
if (!hrtimer_hres_active())
|
||||
return;
|
||||
|
||||
td = &__get_cpu_var(tick_cpu_device);
|
||||
td = this_cpu_ptr(&tick_cpu_device);
|
||||
if (td && td->evtdev)
|
||||
hrtimer_interrupt(td->evtdev);
|
||||
}
|
||||
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void)
|
||||
void hrtimer_run_queues(void)
|
||||
{
|
||||
struct timerqueue_node *node;
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||||
struct hrtimer_clock_base *base;
|
||||
int index, gettime = 1;
|
||||
|
||||
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu)
|
||||
|
||||
local_irq_disable();
|
||||
old_base = &per_cpu(hrtimer_bases, scpu);
|
||||
new_base = &__get_cpu_var(hrtimer_bases);
|
||||
new_base = this_cpu_ptr(&hrtimer_bases);
|
||||
/*
|
||||
* The caller is globally serialized and nobody else
|
||||
* takes two locks at once, deadlock is not possible.
|
||||
|
@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
||||
void tick_check_oneshot_broadcast_this_cpu(void)
|
||||
{
|
||||
if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
|
||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
/*
|
||||
* We might be in the middle of switching over from
|
||||
|
@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td,
|
||||
|
||||
void tick_install_replacement(struct clock_event_device *newdev)
|
||||
{
|
||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
clockevents_exchange_device(td->evtdev, newdev);
|
||||
@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup)
|
||||
|
||||
void tick_suspend(void)
|
||||
{
|
||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
clockevents_shutdown(td->evtdev);
|
||||
}
|
||||
|
||||
void tick_resume(void)
|
||||
{
|
||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
int broadcast = tick_resume_broadcast();
|
||||
|
||||
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
|
||||
|
@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
|
||||
*/
|
||||
int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
|
||||
{
|
||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
struct clock_event_device *dev = td->evtdev;
|
||||
|
||||
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
|
||||
|
@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
|
||||
*/
|
||||
void __tick_nohz_full_check(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
if (tick_nohz_full_cpu(smp_processor_id())) {
|
||||
if (ts->tick_stopped && !is_idle_task(current)) {
|
||||
@@ -573,7 +573,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
|
||||
ktime_t last_update, expires, ret = { .tv64 = 0 };
|
||||
unsigned long rcu_delta_jiffies;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
u64 time_delta;
|
||||
|
||||
time_delta = timekeeping_max_deferment();
|
||||
@@ -841,7 +841,7 @@ void tick_nohz_idle_enter(void)
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
ts = &__get_cpu_var(tick_cpu_sched);
|
||||
ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
ts->inidle = 1;
|
||||
__tick_nohz_idle_enter(ts);
|
||||
|
||||
@@ -859,7 +859,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
|
||||
*/
|
||||
void tick_nohz_irq_exit(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
if (ts->inidle)
|
||||
__tick_nohz_idle_enter(ts);
|
||||
@@ -874,7 +874,7 @@ void tick_nohz_irq_exit(void)
|
||||
*/
|
||||
ktime_t tick_nohz_get_sleep_length(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
return ts->sleep_length;
|
||||
}
|
||||
@@ -952,7 +952,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
|
||||
*/
|
||||
void tick_nohz_idle_exit(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
ktime_t now;
|
||||
|
||||
local_irq_disable();
|
||||
@@ -987,7 +987,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
|
||||
*/
|
||||
static void tick_nohz_handler(struct clock_event_device *dev)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
ktime_t now = ktime_get();
|
||||
|
||||
@@ -1011,7 +1011,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
||||
*/
|
||||
static void tick_nohz_switch_to_nohz(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
ktime_t next;
|
||||
|
||||
if (!tick_nohz_enabled)
|
||||
@@ -1073,7 +1073,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
|
||||
|
||||
static inline void tick_nohz_irq_enter(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
ktime_t now;
|
||||
|
||||
if (!ts->idle_active && !ts->tick_stopped)
|
||||
@@ -1151,7 +1151,7 @@ early_param("skew_tick", skew_tick);
|
||||
*/
|
||||
void tick_setup_sched_timer(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
ktime_t now = ktime_get();
|
||||
|
||||
/*
|
||||
@@ -1220,7 +1220,7 @@ void tick_clock_notify(void)
|
||||
*/
|
||||
void tick_oneshot_notify(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
set_bit(0, &ts->check_clocks);
|
||||
}
|
||||
@@ -1235,7 +1235,7 @@ void tick_oneshot_notify(void)
|
||||
*/
|
||||
int tick_check_oneshot_change(int allow_nohz)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
if (!test_and_clear_bit(0, &ts->check_clocks))
|
||||
return 0;
|
||||
|
@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer)
|
||||
static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
||||
const char *name, struct lock_class_key *key)
|
||||
{
|
||||
struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
|
||||
struct tvec_base *base = raw_cpu_read(tvec_bases);
|
||||
|
||||
timer->entry.next = NULL;
|
||||
timer->base = (void *)((unsigned long)base | flags);
|
||||
|
@@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
|
||||
void user_return_notifier_register(struct user_return_notifier *urn)
|
||||
{
|
||||
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
||||
hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
|
||||
hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(user_return_notifier_register);
|
||||
|
||||
@@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
|
||||
void user_return_notifier_unregister(struct user_return_notifier *urn)
|
||||
{
|
||||
hlist_del(&urn->link);
|
||||
if (hlist_empty(&__get_cpu_var(return_notifier_list)))
|
||||
if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
|
||||
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
|
||||
|
@@ -208,7 +208,7 @@ void touch_nmi_watchdog(void)
|
||||
* case we shouldn't have to worry about the watchdog
|
||||
* going off.
|
||||
*/
|
||||
__raw_get_cpu_var(watchdog_nmi_touch) = true;
|
||||
raw_cpu_write(watchdog_nmi_touch, true);
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
EXPORT_SYMBOL(touch_nmi_watchdog);
|
||||
@@ -217,8 +217,8 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
|
||||
|
||||
void touch_softlockup_watchdog_sync(void)
|
||||
{
|
||||
__raw_get_cpu_var(softlockup_touch_sync) = true;
|
||||
__raw_get_cpu_var(watchdog_touch_ts) = 0;
|
||||
__this_cpu_write(softlockup_touch_sync, true);
|
||||
__this_cpu_write(watchdog_touch_ts, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||
@@ -425,7 +425,7 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
|
||||
|
||||
static void watchdog_enable(unsigned int cpu)
|
||||
{
|
||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
|
||||
|
||||
/* kick off the timer for the hardlockup detector */
|
||||
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
@@ -445,7 +445,7 @@ static void watchdog_enable(unsigned int cpu)
|
||||
|
||||
static void watchdog_disable(unsigned int cpu)
|
||||
{
|
||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
|
||||
|
||||
watchdog_set_prio(SCHED_NORMAL, 0);
|
||||
hrtimer_cancel(hrtimer);
|
||||
@@ -585,7 +585,7 @@ static struct smp_hotplug_thread watchdog_threads = {
|
||||
|
||||
static void restart_watchdog_hrtimer(void *info)
|
||||
{
|
||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user