FROMGIT: KVM: arm64: Provide KVM's own save/restore SVE primitives
as we are about to change the way KVM deals with SVE, provide KVM with its own save/restore SVE primitives. No functional change intended. Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> (cherry picked from commit 297b8603e356ad82c1345cc75fad4d89310a3c34 git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git next) Signed-off-by: Will Deacon <willdeacon@google.com> Bug: 178098380 Test: atest VirtualizationHostTestCases on an EL2-enabled device Change-Id: I77414def3e06f71324eec25f5a204d04e6c5c328
This commit is contained in:

committed by
Quentin Perret

parent
af855ca1c9
commit
f5e060d65e
@@ -6,6 +6,8 @@
|
|||||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <asm/assembler.h>
|
||||||
|
|
||||||
.macro fpsimd_save state, tmpnr
|
.macro fpsimd_save state, tmpnr
|
||||||
stp q0, q1, [\state, #16 * 0]
|
stp q0, q1, [\state, #16 * 0]
|
||||||
stp q2, q3, [\state, #16 * 2]
|
stp q2, q3, [\state, #16 * 2]
|
||||||
|
@@ -90,6 +90,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
|||||||
|
|
||||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||||
|
void __sve_save_state(void *sve_pffr, u32 *fpsr);
|
||||||
|
void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
|
||||||
|
|
||||||
#ifndef __KVM_NVHE_HYPERVISOR__
|
#ifndef __KVM_NVHE_HYPERVISOR__
|
||||||
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
||||||
|
@@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
|
|||||||
fpsimd_restore x0, 1
|
fpsimd_restore x0, 1
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(__fpsimd_restore_state)
|
SYM_FUNC_END(__fpsimd_restore_state)
|
||||||
|
|
||||||
|
SYM_FUNC_START(__sve_restore_state)
|
||||||
|
sve_load 0, x1, x2, 3, x4
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(__sve_restore_state)
|
||||||
|
|
||||||
|
SYM_FUNC_START(__sve_save_state)
|
||||||
|
sve_save 0, x1, 2
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(__sve_save_state)
|
||||||
|
@@ -261,7 +261,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.host_fpsimd_state,
|
vcpu->arch.host_fpsimd_state,
|
||||||
struct thread_struct, uw.fpsimd_state);
|
struct thread_struct, uw.fpsimd_state);
|
||||||
|
|
||||||
sve_save_state(sve_pffr(thread),
|
__sve_save_state(sve_pffr(thread),
|
||||||
&vcpu->arch.host_fpsimd_state->fpsr);
|
&vcpu->arch.host_fpsimd_state->fpsr);
|
||||||
} else {
|
} else {
|
||||||
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
|
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
|
||||||
@@ -271,7 +271,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sve_guest) {
|
if (sve_guest) {
|
||||||
sve_load_state(vcpu_sve_pffr(vcpu),
|
__sve_restore_state(vcpu_sve_pffr(vcpu),
|
||||||
&vcpu->arch.ctxt.fp_regs.fpsr,
|
&vcpu->arch.ctxt.fp_regs.fpsr,
|
||||||
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
|
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
|
||||||
write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
|
write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
|
||||||
|
Reference in New Issue
Block a user