core: Replace __get_cpu_var with __this_cpu_read if not used for an address.
__get_cpu_var() can be replaced with this_cpu_read and will then use a single read instruction with implied address calculation to access the correct per cpu instance. However, the address of a per cpu variable passed to __this_cpu_read() cannot be determined (since it's an implied address conversion through segment prefixes). Therefore apply this only to uses of __get_cpu_var where the address of the variable is not used. Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Hugh Dickins <hughd@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:

committed by
Tejun Heo

parent
780f36d8b3
commit
909ea96468
@@ -116,12 +116,12 @@ static void __touch_watchdog(void)
|
||||
{
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
__get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
|
||||
__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__raw_get_cpu_var(watchdog_touch_ts) = 0;
|
||||
__this_cpu_write(watchdog_touch_ts, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
@@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void)
|
||||
/* watchdog detector functions */
|
||||
static int is_hardlockup(void)
|
||||
{
|
||||
unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
|
||||
unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
|
||||
|
||||
if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
|
||||
if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
|
||||
return 1;
|
||||
|
||||
__get_cpu_var(hrtimer_interrupts_saved) = hrint;
|
||||
__this_cpu_write(hrtimer_interrupts_saved, hrint);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
/* Ensure the watchdog never gets throttled */
|
||||
event->hw.interrupts = 0;
|
||||
|
||||
if (__get_cpu_var(watchdog_nmi_touch) == true) {
|
||||
__get_cpu_var(watchdog_nmi_touch) = false;
|
||||
if (__this_cpu_read(watchdog_nmi_touch) == true) {
|
||||
__this_cpu_write(watchdog_nmi_touch, false);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
/* only print hardlockups once */
|
||||
if (__get_cpu_var(hard_watchdog_warn) == true)
|
||||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||
return;
|
||||
|
||||
if (hardlockup_panic)
|
||||
@@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
else
|
||||
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
||||
|
||||
__get_cpu_var(hard_watchdog_warn) = true;
|
||||
__this_cpu_write(hard_watchdog_warn, true);
|
||||
return;
|
||||
}
|
||||
|
||||
__get_cpu_var(hard_watchdog_warn) = false;
|
||||
__this_cpu_write(hard_watchdog_warn, false);
|
||||
return;
|
||||
}
|
||||
static void watchdog_interrupt_count(void)
|
||||
{
|
||||
__get_cpu_var(hrtimer_interrupts)++;
|
||||
__this_cpu_inc(hrtimer_interrupts);
|
||||
}
|
||||
#else
|
||||
static inline void watchdog_interrupt_count(void) { return; }
|
||||
@@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; }
|
||||
/* watchdog kicker functions */
|
||||
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
{
|
||||
unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
|
||||
unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
int duration;
|
||||
|
||||
@@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
watchdog_interrupt_count();
|
||||
|
||||
/* kick the softlockup detector */
|
||||
wake_up_process(__get_cpu_var(softlockup_watchdog));
|
||||
wake_up_process(__this_cpu_read(softlockup_watchdog));
|
||||
|
||||
/* .. and repeat */
|
||||
hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
|
||||
|
||||
if (touch_ts == 0) {
|
||||
if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
|
||||
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
|
||||
/*
|
||||
* If the time stamp was touched atomically
|
||||
* make sure the scheduler tick is up to date.
|
||||
*/
|
||||
__get_cpu_var(softlockup_touch_sync) = false;
|
||||
__this_cpu_write(softlockup_touch_sync, false);
|
||||
sched_clock_tick();
|
||||
}
|
||||
__touch_watchdog();
|
||||
@@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
duration = is_softlockup(touch_ts);
|
||||
if (unlikely(duration)) {
|
||||
/* only warn once */
|
||||
if (__get_cpu_var(soft_watchdog_warn) == true)
|
||||
if (__this_cpu_read(soft_watchdog_warn) == true)
|
||||
return HRTIMER_RESTART;
|
||||
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
@@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
|
||||
if (softlockup_panic)
|
||||
panic("softlockup: hung tasks");
|
||||
__get_cpu_var(soft_watchdog_warn) = true;
|
||||
__this_cpu_write(soft_watchdog_warn, true);
|
||||
} else
|
||||
__get_cpu_var(soft_watchdog_warn) = false;
|
||||
__this_cpu_write(soft_watchdog_warn, false);
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
Reference in New Issue
Block a user