arm64: KVM: Dynamically patch the kernel/hyp VA mask
So far, we're using a complicated sequence of alternatives to patch the kernel/hyp VA mask on non-VHE, and NOP out the masking altogether when on VHE. The newly introduced dynamic patching gives us the opportunity to simplify that code by patching a single instruction with the correct mask (instead of the mind bending cumulative masking we have at the moment) or even a single NOP on VHE. This also adds some initial code that will allow the patching callback to switch to a more complex patching. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: James Morse <james.morse@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
@@ -69,9 +69,6 @@
|
||||
* mappings, and none of this applies in that case.
|
||||
*/
|
||||
|
||||
#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
|
||||
#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#include <asm/alternative.h>
|
||||
@@ -81,28 +78,15 @@
|
||||
* Convert a kernel VA into a HYP VA.
|
||||
* reg: VA to be converted.
|
||||
*
|
||||
* This generates the following sequences:
|
||||
* - High mask:
|
||||
* and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
|
||||
* nop
|
||||
* - Low mask:
|
||||
* and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
|
||||
* and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
|
||||
* - VHE:
|
||||
* nop
|
||||
* nop
|
||||
*
|
||||
* The "low mask" version works because the mask is a strict subset of
|
||||
* the "high mask", hence performing the first mask for nothing.
|
||||
* Should be completely invisible on any viable CPU.
|
||||
* The actual code generation takes place in kvm_update_va_mask, and
|
||||
* the instructions below are only there to reserve the space and
|
||||
* perform the register allocation (kvm_update_va_mask uses the
|
||||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
.macro kern_hyp_va reg
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
|
||||
alternative_else_nop_endif
|
||||
alternative_if ARM64_HYP_OFFSET_LOW
|
||||
and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
|
||||
alternative_else_nop_endif
|
||||
alternative_cb kvm_update_va_mask
|
||||
and \reg, \reg, #1
|
||||
alternative_cb_end
|
||||
.endm
|
||||
|
||||
#else
|
||||
@@ -113,18 +97,14 @@ alternative_else_nop_endif
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
|
||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("and %0, %0, %1",
|
||||
"nop",
|
||||
ARM64_HAS_VIRT_HOST_EXTN)
|
||||
: "+r" (v)
|
||||
: "i" (HYP_PAGE_OFFSET_HIGH_MASK));
|
||||
asm volatile(ALTERNATIVE("nop",
|
||||
"and %0, %0, %1",
|
||||
ARM64_HYP_OFFSET_LOW)
|
||||
: "+r" (v)
|
||||
: "i" (HYP_PAGE_OFFSET_LOW_MASK));
|
||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n",
|
||||
kvm_update_va_mask)
|
||||
: "+r" (v));
|
||||
return v;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user