tlb.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2022 Ventana Micro Systems Inc.
  4. */
  5. #include <linux/bitmap.h>
  6. #include <linux/cpumask.h>
  7. #include <linux/errno.h>
  8. #include <linux/err.h>
  9. #include <linux/module.h>
  10. #include <linux/smp.h>
  11. #include <linux/kvm_host.h>
  12. #include <asm/cacheflush.h>
  13. #include <asm/csr.h>
  14. #include <asm/hwcap.h>
  15. #include <asm/insn-def.h>
  16. #define has_svinval() \
  17. static_branch_unlikely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_SVINVAL])
  18. void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
  19. gpa_t gpa, gpa_t gpsz,
  20. unsigned long order)
  21. {
  22. gpa_t pos;
  23. if (PTRS_PER_PTE < (gpsz >> order)) {
  24. kvm_riscv_local_hfence_gvma_vmid_all(vmid);
  25. return;
  26. }
  27. if (has_svinval()) {
  28. asm volatile (SFENCE_W_INVAL() ::: "memory");
  29. for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
  30. asm volatile (HINVAL_GVMA(%0, %1)
  31. : : "r" (pos >> 2), "r" (vmid) : "memory");
  32. asm volatile (SFENCE_INVAL_IR() ::: "memory");
  33. } else {
  34. for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
  35. asm volatile (HFENCE_GVMA(%0, %1)
  36. : : "r" (pos >> 2), "r" (vmid) : "memory");
  37. }
  38. }
  39. void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
  40. {
  41. asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
  42. }
  43. void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
  44. unsigned long order)
  45. {
  46. gpa_t pos;
  47. if (PTRS_PER_PTE < (gpsz >> order)) {
  48. kvm_riscv_local_hfence_gvma_all();
  49. return;
  50. }
  51. if (has_svinval()) {
  52. asm volatile (SFENCE_W_INVAL() ::: "memory");
  53. for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
  54. asm volatile(HINVAL_GVMA(%0, zero)
  55. : : "r" (pos >> 2) : "memory");
  56. asm volatile (SFENCE_INVAL_IR() ::: "memory");
  57. } else {
  58. for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
  59. asm volatile(HFENCE_GVMA(%0, zero)
  60. : : "r" (pos >> 2) : "memory");
  61. }
  62. }
  63. void kvm_riscv_local_hfence_gvma_all(void)
  64. {
  65. asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
  66. }
  67. void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
  68. unsigned long asid,
  69. unsigned long gva,
  70. unsigned long gvsz,
  71. unsigned long order)
  72. {
  73. unsigned long pos, hgatp;
  74. if (PTRS_PER_PTE < (gvsz >> order)) {
  75. kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
  76. return;
  77. }
  78. hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
  79. if (has_svinval()) {
  80. asm volatile (SFENCE_W_INVAL() ::: "memory");
  81. for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
  82. asm volatile(HINVAL_VVMA(%0, %1)
  83. : : "r" (pos), "r" (asid) : "memory");
  84. asm volatile (SFENCE_INVAL_IR() ::: "memory");
  85. } else {
  86. for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
  87. asm volatile(HFENCE_VVMA(%0, %1)
  88. : : "r" (pos), "r" (asid) : "memory");
  89. }
  90. csr_write(CSR_HGATP, hgatp);
  91. }
  92. void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
  93. unsigned long asid)
  94. {
  95. unsigned long hgatp;
  96. hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
  97. asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
  98. csr_write(CSR_HGATP, hgatp);
  99. }
  100. void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
  101. unsigned long gva, unsigned long gvsz,
  102. unsigned long order)
  103. {
  104. unsigned long pos, hgatp;
  105. if (PTRS_PER_PTE < (gvsz >> order)) {
  106. kvm_riscv_local_hfence_vvma_all(vmid);
  107. return;
  108. }
  109. hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
  110. if (has_svinval()) {
  111. asm volatile (SFENCE_W_INVAL() ::: "memory");
  112. for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
  113. asm volatile(HINVAL_VVMA(%0, zero)
  114. : : "r" (pos) : "memory");
  115. asm volatile (SFENCE_INVAL_IR() ::: "memory");
  116. } else {
  117. for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
  118. asm volatile(HFENCE_VVMA(%0, zero)
  119. : : "r" (pos) : "memory");
  120. }
  121. csr_write(CSR_HGATP, hgatp);
  122. }
  123. void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
  124. {
  125. unsigned long hgatp;
  126. hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
  127. asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
  128. csr_write(CSR_HGATP, hgatp);
  129. }
  130. void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
  131. {
  132. unsigned long vmid;
  133. if (!kvm_riscv_gstage_vmid_bits() ||
  134. vcpu->arch.last_exit_cpu == vcpu->cpu)
  135. return;
  136. /*
  137. * On RISC-V platforms with hardware VMID support, we share same
  138. * VMID for all VCPUs of a particular Guest/VM. This means we might
  139. * have stale G-stage TLB entries on the current Host CPU due to
  140. * some other VCPU of the same Guest which ran previously on the
  141. * current Host CPU.
  142. *
  143. * To cleanup stale TLB entries, we simply flush all G-stage TLB
  144. * entries by VMID whenever underlying Host CPU changes for a VCPU.
  145. */
  146. vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
  147. kvm_riscv_local_hfence_gvma_vmid_all(vmid);
  148. }
  149. void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
  150. {
  151. local_flush_icache_all();
  152. }
  153. void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
  154. {
  155. struct kvm_vmid *vmid;
  156. vmid = &vcpu->kvm->arch.vmid;
  157. kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
  158. }
  159. void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
  160. {
  161. struct kvm_vmid *vmid;
  162. vmid = &vcpu->kvm->arch.vmid;
  163. kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
  164. }
  165. static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
  166. struct kvm_riscv_hfence *out_data)
  167. {
  168. bool ret = false;
  169. struct kvm_vcpu_arch *varch = &vcpu->arch;
  170. spin_lock(&varch->hfence_lock);
  171. if (varch->hfence_queue[varch->hfence_head].type) {
  172. memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
  173. sizeof(*out_data));
  174. varch->hfence_queue[varch->hfence_head].type = 0;
  175. varch->hfence_head++;
  176. if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
  177. varch->hfence_head = 0;
  178. ret = true;
  179. }
  180. spin_unlock(&varch->hfence_lock);
  181. return ret;
  182. }
  183. static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
  184. const struct kvm_riscv_hfence *data)
  185. {
  186. bool ret = false;
  187. struct kvm_vcpu_arch *varch = &vcpu->arch;
  188. spin_lock(&varch->hfence_lock);
  189. if (!varch->hfence_queue[varch->hfence_tail].type) {
  190. memcpy(&varch->hfence_queue[varch->hfence_tail],
  191. data, sizeof(*data));
  192. varch->hfence_tail++;
  193. if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
  194. varch->hfence_tail = 0;
  195. ret = true;
  196. }
  197. spin_unlock(&varch->hfence_lock);
  198. return ret;
  199. }
  200. void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
  201. {
  202. struct kvm_riscv_hfence d = { 0 };
  203. struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
  204. while (vcpu_hfence_dequeue(vcpu, &d)) {
  205. switch (d.type) {
  206. case KVM_RISCV_HFENCE_UNKNOWN:
  207. break;
  208. case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
  209. kvm_riscv_local_hfence_gvma_vmid_gpa(
  210. READ_ONCE(v->vmid),
  211. d.addr, d.size, d.order);
  212. break;
  213. case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
  214. kvm_riscv_local_hfence_vvma_asid_gva(
  215. READ_ONCE(v->vmid), d.asid,
  216. d.addr, d.size, d.order);
  217. break;
  218. case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
  219. kvm_riscv_local_hfence_vvma_asid_all(
  220. READ_ONCE(v->vmid), d.asid);
  221. break;
  222. case KVM_RISCV_HFENCE_VVMA_GVA:
  223. kvm_riscv_local_hfence_vvma_gva(
  224. READ_ONCE(v->vmid),
  225. d.addr, d.size, d.order);
  226. break;
  227. default:
  228. break;
  229. }
  230. }
  231. }
  232. static void make_xfence_request(struct kvm *kvm,
  233. unsigned long hbase, unsigned long hmask,
  234. unsigned int req, unsigned int fallback_req,
  235. const struct kvm_riscv_hfence *data)
  236. {
  237. unsigned long i;
  238. struct kvm_vcpu *vcpu;
  239. unsigned int actual_req = req;
  240. DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
  241. bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS);
  242. kvm_for_each_vcpu(i, vcpu, kvm) {
  243. if (hbase != -1UL) {
  244. if (vcpu->vcpu_id < hbase)
  245. continue;
  246. if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
  247. continue;
  248. }
  249. bitmap_set(vcpu_mask, i, 1);
  250. if (!data || !data->type)
  251. continue;
  252. /*
  253. * Enqueue hfence data to VCPU hfence queue. If we don't
  254. * have space in the VCPU hfence queue then fallback to
  255. * a more conservative hfence request.
  256. */
  257. if (!vcpu_hfence_enqueue(vcpu, data))
  258. actual_req = fallback_req;
  259. }
  260. kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
  261. }
  262. void kvm_riscv_fence_i(struct kvm *kvm,
  263. unsigned long hbase, unsigned long hmask)
  264. {
  265. make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
  266. KVM_REQ_FENCE_I, NULL);
  267. }
  268. void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
  269. unsigned long hbase, unsigned long hmask,
  270. gpa_t gpa, gpa_t gpsz,
  271. unsigned long order)
  272. {
  273. struct kvm_riscv_hfence data;
  274. data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
  275. data.asid = 0;
  276. data.addr = gpa;
  277. data.size = gpsz;
  278. data.order = order;
  279. make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
  280. KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
  281. }
  282. void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
  283. unsigned long hbase, unsigned long hmask)
  284. {
  285. make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
  286. KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
  287. }
  288. void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
  289. unsigned long hbase, unsigned long hmask,
  290. unsigned long gva, unsigned long gvsz,
  291. unsigned long order, unsigned long asid)
  292. {
  293. struct kvm_riscv_hfence data;
  294. data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
  295. data.asid = asid;
  296. data.addr = gva;
  297. data.size = gvsz;
  298. data.order = order;
  299. make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
  300. KVM_REQ_HFENCE_VVMA_ALL, &data);
  301. }
  302. void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
  303. unsigned long hbase, unsigned long hmask,
  304. unsigned long asid)
  305. {
  306. struct kvm_riscv_hfence data;
  307. data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
  308. data.asid = asid;
  309. data.addr = data.size = data.order = 0;
  310. make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
  311. KVM_REQ_HFENCE_VVMA_ALL, &data);
  312. }
  313. void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
  314. unsigned long hbase, unsigned long hmask,
  315. unsigned long gva, unsigned long gvsz,
  316. unsigned long order)
  317. {
  318. struct kvm_riscv_hfence data;
  319. data.type = KVM_RISCV_HFENCE_VVMA_GVA;
  320. data.asid = 0;
  321. data.addr = gva;
  322. data.size = gvsz;
  323. data.order = order;
  324. make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
  325. KVM_REQ_HFENCE_VVMA_ALL, &data);
  326. }
  327. void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
  328. unsigned long hbase, unsigned long hmask)
  329. {
  330. make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
  331. KVM_REQ_HFENCE_VVMA_ALL, NULL);
  332. }