book3s_mmu_hpte.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  4. *
  5. * Authors:
  6. * Alexander Graf <[email protected]>
  7. */
  8. #include <linux/kvm_host.h>
  9. #include <linux/hash.h>
  10. #include <linux/slab.h>
  11. #include <linux/rculist.h>
  12. #include <asm/kvm_ppc.h>
  13. #include <asm/kvm_book3s.h>
  14. #include <asm/machdep.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/hw_irq.h>
  17. #include "trace_pr.h"
  18. #define PTE_SIZE 12
  19. static struct kmem_cache *hpte_cache;
  20. static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
  21. {
  22. return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
  23. }
  24. static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
  25. {
  26. return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
  27. HPTEG_HASH_BITS_PTE_LONG);
  28. }
  29. static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
  30. {
  31. return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
  32. }
  33. static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
  34. {
  35. return hash_64((vpage & 0xffffff000ULL) >> 12,
  36. HPTEG_HASH_BITS_VPTE_LONG);
  37. }
  38. #ifdef CONFIG_PPC_BOOK3S_64
  39. static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
  40. {
  41. return hash_64((vpage & 0xffffffff0ULL) >> 4,
  42. HPTEG_HASH_BITS_VPTE_64K);
  43. }
  44. #endif
  45. void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  46. {
  47. u64 index;
  48. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  49. trace_kvm_book3s_mmu_map(pte);
  50. spin_lock(&vcpu3s->mmu_lock);
  51. /* Add to ePTE list */
  52. index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
  53. hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
  54. /* Add to ePTE_long list */
  55. index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
  56. hlist_add_head_rcu(&pte->list_pte_long,
  57. &vcpu3s->hpte_hash_pte_long[index]);
  58. /* Add to vPTE list */
  59. index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
  60. hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
  61. /* Add to vPTE_long list */
  62. index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
  63. hlist_add_head_rcu(&pte->list_vpte_long,
  64. &vcpu3s->hpte_hash_vpte_long[index]);
  65. #ifdef CONFIG_PPC_BOOK3S_64
  66. /* Add to vPTE_64k list */
  67. index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
  68. hlist_add_head_rcu(&pte->list_vpte_64k,
  69. &vcpu3s->hpte_hash_vpte_64k[index]);
  70. #endif
  71. vcpu3s->hpte_cache_count++;
  72. spin_unlock(&vcpu3s->mmu_lock);
  73. }
  74. static void free_pte_rcu(struct rcu_head *head)
  75. {
  76. struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
  77. kmem_cache_free(hpte_cache, pte);
  78. }
  79. static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  80. {
  81. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  82. trace_kvm_book3s_mmu_invalidate(pte);
  83. /* Different for 32 and 64 bit */
  84. kvmppc_mmu_invalidate_pte(vcpu, pte);
  85. spin_lock(&vcpu3s->mmu_lock);
  86. /* pte already invalidated in between? */
  87. if (hlist_unhashed(&pte->list_pte)) {
  88. spin_unlock(&vcpu3s->mmu_lock);
  89. return;
  90. }
  91. hlist_del_init_rcu(&pte->list_pte);
  92. hlist_del_init_rcu(&pte->list_pte_long);
  93. hlist_del_init_rcu(&pte->list_vpte);
  94. hlist_del_init_rcu(&pte->list_vpte_long);
  95. #ifdef CONFIG_PPC_BOOK3S_64
  96. hlist_del_init_rcu(&pte->list_vpte_64k);
  97. #endif
  98. vcpu3s->hpte_cache_count--;
  99. spin_unlock(&vcpu3s->mmu_lock);
  100. call_rcu(&pte->rcu_head, free_pte_rcu);
  101. }
  102. static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
  103. {
  104. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  105. struct hpte_cache *pte;
  106. int i;
  107. rcu_read_lock();
  108. for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
  109. struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
  110. hlist_for_each_entry_rcu(pte, list, list_vpte_long)
  111. invalidate_pte(vcpu, pte);
  112. }
  113. rcu_read_unlock();
  114. }
  115. static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
  116. {
  117. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  118. struct hlist_head *list;
  119. struct hpte_cache *pte;
  120. /* Find the list of entries in the map */
  121. list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
  122. rcu_read_lock();
  123. /* Check the list for matching entries and invalidate */
  124. hlist_for_each_entry_rcu(pte, list, list_pte)
  125. if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
  126. invalidate_pte(vcpu, pte);
  127. rcu_read_unlock();
  128. }
  129. static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
  130. {
  131. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  132. struct hlist_head *list;
  133. struct hpte_cache *pte;
  134. /* Find the list of entries in the map */
  135. list = &vcpu3s->hpte_hash_pte_long[
  136. kvmppc_mmu_hash_pte_long(guest_ea)];
  137. rcu_read_lock();
  138. /* Check the list for matching entries and invalidate */
  139. hlist_for_each_entry_rcu(pte, list, list_pte_long)
  140. if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
  141. invalidate_pte(vcpu, pte);
  142. rcu_read_unlock();
  143. }
  144. void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
  145. {
  146. trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
  147. guest_ea &= ea_mask;
  148. switch (ea_mask) {
  149. case ~0xfffUL:
  150. kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
  151. break;
  152. case 0x0ffff000:
  153. kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
  154. break;
  155. case 0:
  156. /* Doing a complete flush -> start from scratch */
  157. kvmppc_mmu_pte_flush_all(vcpu);
  158. break;
  159. default:
  160. WARN_ON(1);
  161. break;
  162. }
  163. }
  164. /* Flush with mask 0xfffffffff */
  165. static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
  166. {
  167. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  168. struct hlist_head *list;
  169. struct hpte_cache *pte;
  170. u64 vp_mask = 0xfffffffffULL;
  171. list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
  172. rcu_read_lock();
  173. /* Check the list for matching entries and invalidate */
  174. hlist_for_each_entry_rcu(pte, list, list_vpte)
  175. if ((pte->pte.vpage & vp_mask) == guest_vp)
  176. invalidate_pte(vcpu, pte);
  177. rcu_read_unlock();
  178. }
  179. #ifdef CONFIG_PPC_BOOK3S_64
  180. /* Flush with mask 0xffffffff0 */
  181. static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
  182. {
  183. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  184. struct hlist_head *list;
  185. struct hpte_cache *pte;
  186. u64 vp_mask = 0xffffffff0ULL;
  187. list = &vcpu3s->hpte_hash_vpte_64k[
  188. kvmppc_mmu_hash_vpte_64k(guest_vp)];
  189. rcu_read_lock();
  190. /* Check the list for matching entries and invalidate */
  191. hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
  192. if ((pte->pte.vpage & vp_mask) == guest_vp)
  193. invalidate_pte(vcpu, pte);
  194. rcu_read_unlock();
  195. }
  196. #endif
  197. /* Flush with mask 0xffffff000 */
  198. static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
  199. {
  200. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  201. struct hlist_head *list;
  202. struct hpte_cache *pte;
  203. u64 vp_mask = 0xffffff000ULL;
  204. list = &vcpu3s->hpte_hash_vpte_long[
  205. kvmppc_mmu_hash_vpte_long(guest_vp)];
  206. rcu_read_lock();
  207. /* Check the list for matching entries and invalidate */
  208. hlist_for_each_entry_rcu(pte, list, list_vpte_long)
  209. if ((pte->pte.vpage & vp_mask) == guest_vp)
  210. invalidate_pte(vcpu, pte);
  211. rcu_read_unlock();
  212. }
  213. void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
  214. {
  215. trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
  216. guest_vp &= vp_mask;
  217. switch(vp_mask) {
  218. case 0xfffffffffULL:
  219. kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
  220. break;
  221. #ifdef CONFIG_PPC_BOOK3S_64
  222. case 0xffffffff0ULL:
  223. kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
  224. break;
  225. #endif
  226. case 0xffffff000ULL:
  227. kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
  228. break;
  229. default:
  230. WARN_ON(1);
  231. return;
  232. }
  233. }
  234. void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
  235. {
  236. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  237. struct hpte_cache *pte;
  238. int i;
  239. trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
  240. rcu_read_lock();
  241. for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
  242. struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
  243. hlist_for_each_entry_rcu(pte, list, list_vpte_long)
  244. if ((pte->pte.raddr >= pa_start) &&
  245. (pte->pte.raddr < pa_end))
  246. invalidate_pte(vcpu, pte);
  247. }
  248. rcu_read_unlock();
  249. }
  250. struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
  251. {
  252. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  253. struct hpte_cache *pte;
  254. if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
  255. kvmppc_mmu_pte_flush_all(vcpu);
  256. pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
  257. return pte;
  258. }
  259. void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
  260. {
  261. kmem_cache_free(hpte_cache, pte);
  262. }
  263. void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
  264. {
  265. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  266. }
  267. static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
  268. {
  269. int i;
  270. for (i = 0; i < len; i++)
  271. INIT_HLIST_HEAD(&hash_list[i]);
  272. }
  273. int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
  274. {
  275. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  276. /* init hpte lookup hashes */
  277. kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
  278. ARRAY_SIZE(vcpu3s->hpte_hash_pte));
  279. kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
  280. ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
  281. kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
  282. ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
  283. kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
  284. ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
  285. #ifdef CONFIG_PPC_BOOK3S_64
  286. kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
  287. ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
  288. #endif
  289. spin_lock_init(&vcpu3s->mmu_lock);
  290. return 0;
  291. }
  292. int kvmppc_mmu_hpte_sysinit(void)
  293. {
  294. /* init hpte slab cache */
  295. hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
  296. sizeof(struct hpte_cache), 0, NULL);
  297. return 0;
  298. }
  299. void kvmppc_mmu_hpte_sysexit(void)
  300. {
  301. kmem_cache_destroy(hpte_cache);
  302. }