tlb.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <asm/kvm_hyp.h>
  7. #include <asm/kvm_mmu.h>
  8. #include <asm/tlbflush.h>
  9. #include <nvhe/mem_protect.h>
  10. struct tlb_inv_context {
  11. struct kvm_s2_mmu *mmu;
  12. u64 tcr;
  13. u64 sctlr;
  14. };
  15. static void enter_vmid_context(struct kvm_s2_mmu *mmu,
  16. struct tlb_inv_context *cxt)
  17. {
  18. struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
  19. struct kvm_cpu_context *host_ctxt;
  20. struct kvm_vcpu *vcpu;
  21. host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
  22. vcpu = host_ctxt->__hyp_running_vcpu;
  23. cxt->mmu = NULL;
  24. /*
  25. * If we're already in the desired context, then there's nothing
  26. * to do.
  27. */
  28. if (vcpu) {
  29. /* We're in guest context */
  30. if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
  31. return;
  32. cxt->mmu = vcpu->arch.hw_mmu;
  33. } else {
  34. /* We're in host context */
  35. if (mmu == host_s2_mmu)
  36. return;
  37. cxt->mmu = host_s2_mmu;
  38. }
  39. if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
  40. u64 val;
  41. /*
  42. * For CPUs that are affected by ARM 1319367, we need to
  43. * avoid a Stage-1 walk with the old VMID while we have
  44. * the new VMID set in the VTTBR in order to invalidate TLBs.
  45. * We're guaranteed that the host S1 MMU is enabled, so
  46. * we can simply set the EPD bits to avoid any further
  47. * TLB fill. For guests, we ensure that the S1 MMU is
  48. * temporarily enabled in the next context.
  49. */
  50. val = cxt->tcr = read_sysreg_el1(SYS_TCR);
  51. val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
  52. write_sysreg_el1(val, SYS_TCR);
  53. isb();
  54. if (vcpu) {
  55. val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
  56. if (!(val & SCTLR_ELx_M)) {
  57. val |= SCTLR_ELx_M;
  58. write_sysreg_el1(val, SYS_SCTLR);
  59. isb();
  60. }
  61. } else {
  62. /* The host S1 MMU is always enabled. */
  63. cxt->sctlr = SCTLR_ELx_M;
  64. }
  65. }
  66. /*
  67. * __load_stage2() includes an ISB only when the AT
  68. * workaround is applied. Take care of the opposite condition,
  69. * ensuring that we always have an ISB, but not two ISBs back
  70. * to back.
  71. */
  72. if (vcpu)
  73. __load_host_stage2();
  74. else
  75. __load_stage2(mmu, kern_hyp_va(mmu->arch));
  76. asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
  77. }
  78. static void exit_vmid_context(struct tlb_inv_context *cxt)
  79. {
  80. struct kvm_s2_mmu *mmu = cxt->mmu;
  81. struct kvm_cpu_context *host_ctxt;
  82. struct kvm_vcpu *vcpu;
  83. host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
  84. vcpu = host_ctxt->__hyp_running_vcpu;
  85. if (!mmu)
  86. return;
  87. if (vcpu)
  88. __load_stage2(mmu, kern_hyp_va(mmu->arch));
  89. else
  90. __load_host_stage2();
  91. if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
  92. /* Ensure write of the old VMID */
  93. isb();
  94. if (!(cxt->sctlr & SCTLR_ELx_M)) {
  95. write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
  96. isb();
  97. }
  98. write_sysreg_el1(cxt->tcr, SYS_TCR);
  99. }
  100. cxt->mmu = NULL;
  101. }
  102. void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
  103. phys_addr_t ipa, int level)
  104. {
  105. struct tlb_inv_context cxt;
  106. dsb(ishst);
  107. /* Switch to requested VMID */
  108. enter_vmid_context(mmu, &cxt);
  109. /*
  110. * We could do so much better if we had the VA as well.
  111. * Instead, we invalidate Stage-2 for this IPA, and the
  112. * whole of Stage-1. Weep...
  113. */
  114. ipa >>= 12;
  115. __tlbi_level(ipas2e1is, ipa, level);
  116. /*
  117. * We have to ensure completion of the invalidation at Stage-2,
  118. * since a table walk on another CPU could refill a TLB with a
  119. * complete (S1 + S2) walk based on the old Stage-2 mapping if
  120. * the Stage-1 invalidation happened first.
  121. */
  122. dsb(ish);
  123. __tlbi(vmalle1is);
  124. dsb(ish);
  125. isb();
  126. /*
  127. * If the host is running at EL1 and we have a VPIPT I-cache,
  128. * then we must perform I-cache maintenance at EL2 in order for
  129. * it to have an effect on the guest. Since the guest cannot hit
  130. * I-cache lines allocated with a different VMID, we don't need
  131. * to worry about junk out of guest reset (we nuke the I-cache on
  132. * VMID rollover), but we do need to be careful when remapping
  133. * executable pages for the same guest. This can happen when KSM
  134. * takes a CoW fault on an executable page, copies the page into
  135. * a page that was previously mapped in the guest and then needs
  136. * to invalidate the guest view of the I-cache for that page
  137. * from EL1. To solve this, we invalidate the entire I-cache when
  138. * unmapping a page from a guest if we have a VPIPT I-cache but
  139. * the host is running at EL1. As above, we could do better if
  140. * we had the VA.
  141. *
  142. * The moral of this story is: if you have a VPIPT I-cache, then
  143. * you should be running with VHE enabled.
  144. */
  145. if (icache_is_vpipt())
  146. icache_inval_all_pou();
  147. exit_vmid_context(&cxt);
  148. }
  149. void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
  150. {
  151. struct tlb_inv_context cxt;
  152. dsb(ishst);
  153. /* Switch to requested VMID */
  154. enter_vmid_context(mmu, &cxt);
  155. __tlbi(vmalls12e1is);
  156. dsb(ish);
  157. isb();
  158. exit_vmid_context(&cxt);
  159. }
  160. void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
  161. {
  162. struct tlb_inv_context cxt;
  163. /* Switch to requested VMID */
  164. enter_vmid_context(mmu, &cxt);
  165. __tlbi(vmalle1);
  166. asm volatile("ic iallu");
  167. dsb(nsh);
  168. isb();
  169. exit_vmid_context(&cxt);
  170. }
  171. void __kvm_flush_vm_context(void)
  172. {
  173. dsb(ishst);
  174. __tlbi(alle1is);
  175. /*
  176. * VIPT and PIPT caches are not affected by VMID, so no maintenance
  177. * is necessary across a VMID rollover.
  178. *
  179. * VPIPT caches constrain lookup and maintenance to the active VMID,
  180. * so we need to invalidate lines with a stale VMID to avoid an ABA
  181. * race after multiple rollovers.
  182. *
  183. */
  184. if (icache_is_vpipt())
  185. asm volatile("ic ialluis");
  186. dsb(ish);
  187. }