BACKPORT: FROMGIT: KVM: arm64: Rework SVE host-save/guest-restore
In order to keep the code readable, move the host-save/guest-restore sequences in their own functions, with the following changes: - the hypervisor ZCR is now set from C code - ZCR_EL2 is always used as the EL2 accessor This results in some minor assembler macro rework. No functional change intended. Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> (cherry picked from commit 52029198c1cec1e21513d74f87363a0408f28650 git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git next) [will: Fixed trivial context conflict with host stage-2 series in switch.h] Signed-off-by: Will Deacon <willdeacon@google.com> Bug: 178098380 Test: atest VirtualizationHostTestCases on an EL2-enabled device Change-Id: Ibcacce83e2eb142ae5c5672b534def83892060bc
This commit is contained in:

committed by
Quentin Perret

parent
57171b3636
commit
1105b4d1ce
@@ -232,8 +232,7 @@
|
|||||||
str w\nxtmp, [\xpfpsr, #4]
|
str w\nxtmp, [\xpfpsr, #4]
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
|
.macro __sve_load nxbase, xpfpsr, nxtmp
|
||||||
sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
|
|
||||||
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
|
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
|
||||||
_sve_ldr_p 0, \nxbase
|
_sve_ldr_p 0, \nxbase
|
||||||
_sve_wrffr 0
|
_sve_wrffr 0
|
||||||
@@ -244,3 +243,8 @@
|
|||||||
ldr w\nxtmp, [\xpfpsr, #4]
|
ldr w\nxtmp, [\xpfpsr, #4]
|
||||||
msr fpcr, x\nxtmp
|
msr fpcr, x\nxtmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
|
||||||
|
sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
|
||||||
|
__sve_load \nxbase, \xpfpsr, \nxtmp
|
||||||
|
.endm
|
||||||
|
@@ -91,7 +91,7 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
|||||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||||
void __sve_save_state(void *sve_pffr, u32 *fpsr);
|
void __sve_save_state(void *sve_pffr, u32 *fpsr);
|
||||||
void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
|
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
|
||||||
|
|
||||||
#ifndef __KVM_NVHE_HYPERVISOR__
|
#ifndef __KVM_NVHE_HYPERVISOR__
|
||||||
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
||||||
|
@@ -21,7 +21,7 @@ SYM_FUNC_START(__fpsimd_restore_state)
|
|||||||
SYM_FUNC_END(__fpsimd_restore_state)
|
SYM_FUNC_END(__fpsimd_restore_state)
|
||||||
|
|
||||||
SYM_FUNC_START(__sve_restore_state)
|
SYM_FUNC_START(__sve_restore_state)
|
||||||
sve_load 0, x1, x2, 3, x4
|
__sve_load 0, x1, 2
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(__sve_restore_state)
|
SYM_FUNC_END(__sve_restore_state)
|
||||||
|
|
||||||
|
@@ -201,6 +201,24 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
|
|||||||
return __get_fault_info(esr, &vcpu->arch.fault);
|
return __get_fault_info(esr, &vcpu->arch.fault);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct thread_struct *thread;
|
||||||
|
|
||||||
|
thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
|
||||||
|
uw.fpsimd_state);
|
||||||
|
|
||||||
|
__sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||||
|
__sve_restore_state(vcpu_sve_pffr(vcpu),
|
||||||
|
&vcpu->arch.ctxt.fp_regs.fpsr);
|
||||||
|
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
|
||||||
|
}
|
||||||
|
|
||||||
/* Check for an FPSIMD/SVE trap and handle as appropriate */
|
/* Check for an FPSIMD/SVE trap and handle as appropriate */
|
||||||
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
@@ -256,28 +274,18 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
|||||||
* In the SVE case, VHE is assumed: it is enforced by
|
* In the SVE case, VHE is assumed: it is enforced by
|
||||||
* Kconfig and kvm_arch_init().
|
* Kconfig and kvm_arch_init().
|
||||||
*/
|
*/
|
||||||
if (sve_host) {
|
if (sve_host)
|
||||||
struct thread_struct *thread = container_of(
|
__hyp_sve_save_host(vcpu);
|
||||||
vcpu->arch.host_fpsimd_state,
|
else
|
||||||
struct thread_struct, uw.fpsimd_state);
|
|
||||||
|
|
||||||
__sve_save_state(sve_pffr(thread),
|
|
||||||
&vcpu->arch.host_fpsimd_state->fpsr);
|
|
||||||
} else {
|
|
||||||
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
|
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
|
||||||
}
|
|
||||||
|
|
||||||
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
|
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sve_guest) {
|
if (sve_guest)
|
||||||
__sve_restore_state(vcpu_sve_pffr(vcpu),
|
__hyp_sve_restore_guest(vcpu);
|
||||||
&vcpu->arch.ctxt.fp_regs.fpsr,
|
else
|
||||||
vcpu_sve_vq(vcpu) - 1);
|
|
||||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
|
|
||||||
} else {
|
|
||||||
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
|
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
|
||||||
}
|
|
||||||
|
|
||||||
/* Skip restoring fpexc32 for AArch64 guests */
|
/* Skip restoring fpexc32 for AArch64 guests */
|
||||||
if (!(read_sysreg(hcr_el2) & HCR_RW))
|
if (!(read_sysreg(hcr_el2) & HCR_RW))
|
||||||
|
Reference in New Issue
Block a user