e500.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  4. *
  5. * Author: Yu Liu <[email protected]>
  6. * Scott Wood <[email protected]>
  7. * Ashish Kalra <[email protected]>
  8. * Varun Sethi <[email protected]>
  9. *
  10. * Description:
  11. * This file is based on arch/powerpc/kvm/44x_tlb.h and
  12. * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <[email protected]>,
  13. * Copyright IBM Corp. 2007-2008
  14. */
  15. #ifndef KVM_E500_H
  16. #define KVM_E500_H
  17. #include <linux/kvm_host.h>
  18. #include <asm/nohash/mmu-e500.h>
  19. #include <asm/tlb.h>
  20. #include <asm/cputhreads.h>
  21. enum vcpu_ftr {
  22. VCPU_FTR_MMU_V2
  23. };
  24. #define E500_PID_NUM 3
  25. #define E500_TLB_NUM 2
  26. /* entry is mapped somewhere in host TLB */
  27. #define E500_TLB_VALID (1 << 31)
  28. /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
  29. #define E500_TLB_BITMAP (1 << 30)
  30. /* TLB1 entry is mapped by host TLB0 */
  31. #define E500_TLB_TLB0 (1 << 29)
  32. /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
  33. #define E500_TLB_MAS2_ATTR (0x7f)
  34. struct tlbe_ref {
  35. kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
  36. unsigned int flags; /* E500_TLB_* */
  37. };
  38. struct tlbe_priv {
  39. struct tlbe_ref ref;
  40. };
  41. #ifdef CONFIG_KVM_E500V2
  42. struct vcpu_id_table;
  43. #endif
  44. struct kvmppc_e500_tlb_params {
  45. int entries, ways, sets;
  46. };
  47. struct kvmppc_vcpu_e500 {
  48. struct kvm_vcpu vcpu;
  49. /* Unmodified copy of the guest's TLB -- shared with host userspace. */
  50. struct kvm_book3e_206_tlb_entry *gtlb_arch;
  51. /* Starting entry number in gtlb_arch[] */
  52. int gtlb_offset[E500_TLB_NUM];
  53. /* KVM internal information associated with each guest TLB entry */
  54. struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
  55. struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
  56. unsigned int gtlb_nv[E500_TLB_NUM];
  57. unsigned int host_tlb1_nv;
  58. u32 svr;
  59. u32 l1csr0;
  60. u32 l1csr1;
  61. u32 hid0;
  62. u32 hid1;
  63. u64 mcar;
  64. struct page **shared_tlb_pages;
  65. int num_shared_tlb_pages;
  66. u64 *g2h_tlb1_map;
  67. unsigned int *h2g_tlb1_rmap;
  68. /* Minimum and maximum address mapped my TLB1 */
  69. unsigned long tlb1_min_eaddr;
  70. unsigned long tlb1_max_eaddr;
  71. #ifdef CONFIG_KVM_E500V2
  72. u32 pid[E500_PID_NUM];
  73. /* vcpu id table */
  74. struct vcpu_id_table *idt;
  75. #endif
  76. };
  77. static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
  78. {
  79. return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
  80. }
  81. /* This geometry is the legacy default -- can be overridden by userspace */
  82. #define KVM_E500_TLB0_WAY_SIZE 128
  83. #define KVM_E500_TLB0_WAY_NUM 2
  84. #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
  85. #define KVM_E500_TLB1_SIZE 16
  86. #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
  87. #define tlbsel_of(index) ((index) >> 16)
  88. #define esel_of(index) ((index) & 0xFFFF)
  89. #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
  90. #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
  91. #define MAS2_ATTRIB_MASK \
  92. (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
  93. #define MAS3_ATTRIB_MASK \
  94. (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
  95. | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
  96. int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
  97. ulong value);
  98. int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
  99. int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
  100. int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
  101. int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
  102. int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
  103. int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
  104. void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
  105. void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  106. int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  107. int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
  108. union kvmppc_one_reg *val);
  109. int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
  110. union kvmppc_one_reg *val);
  111. #ifdef CONFIG_KVM_E500V2
  112. unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
  113. unsigned int as, unsigned int gid,
  114. unsigned int pr, int avoid_recursion);
  115. #endif
  116. /* TLB helper functions */
  117. static inline unsigned int
  118. get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
  119. {
  120. return (tlbe->mas1 >> 7) & 0x1f;
  121. }
  122. static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
  123. {
  124. return tlbe->mas2 & MAS2_EPN;
  125. }
  126. static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
  127. {
  128. unsigned int pgsize = get_tlb_size(tlbe);
  129. return 1ULL << 10 << pgsize;
  130. }
  131. static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
  132. {
  133. u64 bytes = get_tlb_bytes(tlbe);
  134. return get_tlb_eaddr(tlbe) + bytes - 1;
  135. }
  136. static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
  137. {
  138. return tlbe->mas7_3 & ~0xfffULL;
  139. }
  140. static inline unsigned int
  141. get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
  142. {
  143. return (tlbe->mas1 >> 16) & 0xff;
  144. }
  145. static inline unsigned int
  146. get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
  147. {
  148. return (tlbe->mas1 >> 12) & 0x1;
  149. }
  150. static inline unsigned int
  151. get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
  152. {
  153. return (tlbe->mas1 >> 31) & 0x1;
  154. }
  155. static inline unsigned int
  156. get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
  157. {
  158. return (tlbe->mas1 >> 30) & 0x1;
  159. }
  160. static inline unsigned int
  161. get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
  162. {
  163. return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
  164. }
  165. static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
  166. {
  167. return vcpu->arch.pid & 0xff;
  168. }
  169. static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
  170. {
  171. return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
  172. }
  173. static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
  174. {
  175. return !!(vcpu->arch.shared->msr & MSR_PR);
  176. }
  177. static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
  178. {
  179. return (vcpu->arch.shared->mas6 >> 16) & 0xff;
  180. }
  181. static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
  182. {
  183. return vcpu->arch.shared->mas6 & 0x1;
  184. }
  185. static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
  186. {
  187. /*
  188. * Manual says that tlbsel has 2 bits wide.
  189. * Since we only have two TLBs, only lower bit is used.
  190. */
  191. return (vcpu->arch.shared->mas0 >> 28) & 0x1;
  192. }
  193. static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
  194. {
  195. return vcpu->arch.shared->mas0 & 0xfff;
  196. }
  197. static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
  198. {
  199. return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
  200. }
  201. static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
  202. const struct kvm_book3e_206_tlb_entry *tlbe)
  203. {
  204. gpa_t gpa;
  205. if (!get_tlb_v(tlbe))
  206. return 0;
  207. #ifndef CONFIG_KVM_BOOKE_HV
  208. /* Does it match current guest AS? */
  209. /* XXX what about IS != DS? */
  210. if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
  211. return 0;
  212. #endif
  213. gpa = get_tlb_raddr(tlbe);
  214. if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
  215. /* Mapping is not for RAM. */
  216. return 0;
  217. return 1;
  218. }
  219. static inline struct kvm_book3e_206_tlb_entry *get_entry(
  220. struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
  221. {
  222. int offset = vcpu_e500->gtlb_offset[tlbsel];
  223. return &vcpu_e500->gtlb_arch[offset + entry];
  224. }
  225. void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
  226. struct kvm_book3e_206_tlb_entry *gtlbe);
  227. void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
  228. #ifdef CONFIG_KVM_BOOKE_HV
  229. #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
  230. #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
  231. #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
  232. /*
  233. * These functions should be called with preemption disabled
  234. * and the returned value is valid only in that context
  235. */
  236. static inline int get_thread_specific_lpid(int vm_lpid)
  237. {
  238. int vcpu_lpid = vm_lpid;
  239. if (threads_per_core == 2)
  240. vcpu_lpid |= smp_processor_id() & 1;
  241. return vcpu_lpid;
  242. }
  243. static inline int get_lpid(struct kvm_vcpu *vcpu)
  244. {
  245. return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
  246. }
  247. #else
  248. unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
  249. struct kvm_book3e_206_tlb_entry *gtlbe);
  250. static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
  251. {
  252. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  253. unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
  254. return vcpu_e500->pid[tidseld];
  255. }
  256. /* Force TS=1 for all guest mappings. */
  257. #define get_tlb_sts(gtlbe) (MAS1_TS)
  258. #endif /* !BOOKE_HV */
  259. static inline bool has_feature(const struct kvm_vcpu *vcpu,
  260. enum vcpu_ftr ftr)
  261. {
  262. bool has_ftr;
  263. switch (ftr) {
  264. case VCPU_FTR_MMU_V2:
  265. has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
  266. break;
  267. default:
  268. return false;
  269. }
  270. return has_ftr;
  271. }
  272. #endif /* KVM_E500_H */