UPSTREAM: arm64: vdso: Avoid ISB after reading from cntvct_el0

commit 77ec462536a13d4b428a1eead725c4818a49f0b1 upstream.
(The upstream patch was not marked as fixed but this can fix
Fixes: 28b1a824a4 ("arm64: vdso: Substitute gettimeofday() with C implementation")
sysbench memory comparison:
- Before: 3072.00 MB transferred (2601.11 MB/sec)
- After:  3072.00 MB transferred (3217.86 MB/sec)
)

We can avoid the expensive ISB instruction after reading the counter in
the vDSO gettime functions by creating a fake address hazard against a
dummy stack read, just like we do inside the kernel.

Bug: 195968646
Fixes: 28b1a824a4 ("arm64: vdso: Substitute gettimeofday() with C implementation")
Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/20210318170738.7756-5-will@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
CC: stable@vger.kernel.org
(cherry picked from commit 77ec462536a13d4b428a1eead725c4818a49f0b1)
Signed-off-by: Chanho Park <chanho61.park@samsung.com>
Change-Id: I891873626c27060e7ead724754096a7c5f59e4e6
This commit is contained in:
Will Deacon
2021-03-18 17:07:37 +00:00
committed by Will Deacon
parent a1a3544a4c
commit b2cbc7e5aa
3 changed files with 20 additions and 26 deletions

View File

@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
/*
* Ensure that reads of the counter are treated the same as memory reads
* for the purposes of ordering by subsequent memory barriers.
*
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
* http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
\
asm volatile( \
" eor %0, %1, %1\n" \
" add %0, sp, %0\n" \
" ldr xzr, [%0]" \
: "=r" (tmp) : "r" (_val)); \
} while (0)
static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counter_get_cntvct(void)
return cnt;
}
#undef arch_counter_enforce_ordering
static inline int arch_timer_arch_init(void)
{
return 0;

View File

@@ -71,6 +71,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
return mask;
}
/*
* Ensure that reads of the counter are treated the same as memory reads
* for the purposes of ordering by subsequent memory barriers.
*
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
* http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
\
asm volatile( \
" eor %0, %1, %1\n" \
" add %0, sp, %0\n" \
" ldr xzr, [%0]" \
: "=r" (tmp) : "r" (_val)); \
} while (0)
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)

View File

@@ -83,11 +83,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
*/
isb();
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
/*
* This isb() is required to prevent that the seq lock is
* speculated.#
*/
isb();
arch_counter_enforce_ordering(res);
return res;
}