load_unaligned_zeropad() and __get/put_kernel_nofault() functions can read past some buffer limits which may include some MTE granule with a different tag. When MTE async mode is enabled, the load operation crosses the boundaries and the next granule has a different tag the PE sets the TFSR_EL1.TF1 bit as if an asynchronous tag fault is happened. Enable Tag Check Override (TCO) in these functions before the load and disable it afterwards to prevent this to happen. Note: The same condition can be hit in MTE sync mode but we deal with it through the exception handling. In the current implementation, mte_async_mode flag is set only at boot time but in future kasan might acquire some runtime features that that change the mode dynamically, hence we disable it when sync mode is selected for future proof. Cc: Will Deacon <will@kernel.org> Reported-by: Branislav Rankov <Branislav.Rankov@arm.com> Tested-by: Branislav Rankov <Branislav.Rankov@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Andrey Konovalov <andreyknvl@google.com> Tested-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Link: https://lore.kernel.org/r/20210315132019.33202-6-vincenzo.frascino@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Bug: 170327579 Bug: 172318110 (cherry picked from commit e60beb95c08baf29416d0e06a9e1d4887faf5d1c git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/ for-next/mte-async-kernel-mode) Signed-off-by: Alexander Potapenko <glider@google.com> Change-Id: Id1bf44c5f41bd6de4b2ddf286939cd35712f2299
87 lines
1.8 KiB
C
87 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2013 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_WORD_AT_A_TIME_H
|
|
#define __ASM_WORD_AT_A_TIME_H
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#ifndef __AARCH64EB__
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
struct word_at_a_time {
|
|
const unsigned long one_bits, high_bits;
|
|
};
|
|
|
|
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
|
|
|
|
static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
|
|
const struct word_at_a_time *c)
|
|
{
|
|
unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
|
|
*bits = mask;
|
|
return mask;
|
|
}
|
|
|
|
#define prep_zero_mask(a, bits, c) (bits)
|
|
|
|
static inline unsigned long create_zero_mask(unsigned long bits)
|
|
{
|
|
bits = (bits - 1) & ~bits;
|
|
return bits >> 7;
|
|
}
|
|
|
|
static inline unsigned long find_zero(unsigned long mask)
|
|
{
|
|
return fls64(mask) >> 3;
|
|
}
|
|
|
|
#define zero_bytemask(mask) (mask)
|
|
|
|
#else /* __AARCH64EB__ */
|
|
#include <asm-generic/word-at-a-time.h>
|
|
#endif
|
|
|
|
/*
|
|
* Load an unaligned word from kernel space.
|
|
*
|
|
* In the (very unlikely) case of the word being a page-crosser
|
|
* and the next page not being mapped, take the exception and
|
|
* return zeroes in the non-existing part.
|
|
*/
|
|
static inline unsigned long load_unaligned_zeropad(const void *addr)
|
|
{
|
|
unsigned long ret, tmp;
|
|
|
|
__uaccess_enable_tco_async();
|
|
|
|
/* Load word from unaligned pointer addr */
|
|
asm(
|
|
"1: ldr %0, %3\n"
|
|
"2:\n"
|
|
" .pushsection .fixup,\"ax\"\n"
|
|
" .align 2\n"
|
|
"3: bic %1, %2, #0x7\n"
|
|
" ldr %0, [%1]\n"
|
|
" and %1, %2, #0x7\n"
|
|
" lsl %1, %1, #0x3\n"
|
|
#ifndef __AARCH64EB__
|
|
" lsr %0, %0, %1\n"
|
|
#else
|
|
" lsl %0, %0, %1\n"
|
|
#endif
|
|
" b 2b\n"
|
|
" .popsection\n"
|
|
_ASM_EXTABLE(1b, 3b)
|
|
: "=&r" (ret), "=&r" (tmp)
|
|
: "r" (addr), "Q" (*(unsigned long *)addr));
|
|
|
|
__uaccess_disable_tco_async();
|
|
|
|
return ret;
|
|
}
|
|
|
|
#endif /* __ASM_WORD_AT_A_TIME_H */
|