tlb.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <linux/irqflags.h>
  7. #include <asm/kvm_hyp.h>
  8. #include <asm/kvm_mmu.h>
  9. #include <asm/tlbflush.h>
  10. struct tlb_inv_context {
  11. unsigned long flags;
  12. u64 tcr;
  13. u64 sctlr;
  14. };
  15. static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
  16. struct tlb_inv_context *cxt)
  17. {
  18. u64 val;
  19. local_irq_save(cxt->flags);
  20. if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
  21. /*
  22. * For CPUs that are affected by ARM errata 1165522 or 1530923,
  23. * we cannot trust stage-1 to be in a correct state at that
  24. * point. Since we do not want to force a full load of the
  25. * vcpu state, we prevent the EL1 page-table walker to
  26. * allocate new TLBs. This is done by setting the EPD bits
  27. * in the TCR_EL1 register. We also need to prevent it to
  28. * allocate IPA->PA walks, so we enable the S1 MMU...
  29. */
  30. val = cxt->tcr = read_sysreg_el1(SYS_TCR);
  31. val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
  32. write_sysreg_el1(val, SYS_TCR);
  33. val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
  34. val |= SCTLR_ELx_M;
  35. write_sysreg_el1(val, SYS_SCTLR);
  36. }
  37. /*
  38. * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
  39. * most TLB operations target EL2/EL0. In order to affect the
  40. * guest TLBs (EL1/EL0), we need to change one of these two
  41. * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
  42. * let's flip TGE before executing the TLB operation.
  43. *
  44. * ARM erratum 1165522 requires some special handling (again),
  45. * as we need to make sure both stages of translation are in
  46. * place before clearing TGE. __load_stage2() already
  47. * has an ISB in order to deal with this.
  48. */
  49. __load_stage2(mmu, mmu->arch);
  50. val = read_sysreg(hcr_el2);
  51. val &= ~HCR_TGE;
  52. write_sysreg(val, hcr_el2);
  53. isb();
  54. }
  55. static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
  56. {
  57. /*
  58. * We're done with the TLB operation, let's restore the host's
  59. * view of HCR_EL2.
  60. */
  61. write_sysreg(0, vttbr_el2);
  62. write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
  63. isb();
  64. if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
  65. /* Restore the registers to what they were */
  66. write_sysreg_el1(cxt->tcr, SYS_TCR);
  67. write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
  68. }
  69. local_irq_restore(cxt->flags);
  70. }
  71. void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
  72. phys_addr_t ipa, int level)
  73. {
  74. struct tlb_inv_context cxt;
  75. dsb(ishst);
  76. /* Switch to requested VMID */
  77. __tlb_switch_to_guest(mmu, &cxt);
  78. /*
  79. * We could do so much better if we had the VA as well.
  80. * Instead, we invalidate Stage-2 for this IPA, and the
  81. * whole of Stage-1. Weep...
  82. */
  83. ipa >>= 12;
  84. __tlbi_level(ipas2e1is, ipa, level);
  85. /*
  86. * We have to ensure completion of the invalidation at Stage-2,
  87. * since a table walk on another CPU could refill a TLB with a
  88. * complete (S1 + S2) walk based on the old Stage-2 mapping if
  89. * the Stage-1 invalidation happened first.
  90. */
  91. dsb(ish);
  92. __tlbi(vmalle1is);
  93. dsb(ish);
  94. isb();
  95. __tlb_switch_to_host(&cxt);
  96. }
  97. void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
  98. {
  99. struct tlb_inv_context cxt;
  100. dsb(ishst);
  101. /* Switch to requested VMID */
  102. __tlb_switch_to_guest(mmu, &cxt);
  103. __tlbi(vmalls12e1is);
  104. dsb(ish);
  105. isb();
  106. __tlb_switch_to_host(&cxt);
  107. }
  108. void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
  109. {
  110. struct tlb_inv_context cxt;
  111. /* Switch to requested VMID */
  112. __tlb_switch_to_guest(mmu, &cxt);
  113. __tlbi(vmalle1);
  114. asm volatile("ic iallu");
  115. dsb(nsh);
  116. isb();
  117. __tlb_switch_to_host(&cxt);
  118. }
  119. void __kvm_flush_vm_context(void)
  120. {
  121. dsb(ishst);
  122. __tlbi(alle1is);
  123. /*
  124. * VIPT and PIPT caches are not affected by VMID, so no maintenance
  125. * is necessary across a VMID rollover.
  126. *
  127. * VPIPT caches constrain lookup and maintenance to the active VMID,
  128. * so we need to invalidate lines with a stale VMID to avoid an ABA
  129. * race after multiple rollovers.
  130. *
  131. */
  132. if (icache_is_vpipt())
  133. asm volatile("ic ialluis");
  134. dsb(ish);
  135. }