kvm_ppc.h 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * Authors: Hollis Blanchard <[email protected]>
  7. */
  8. #ifndef __POWERPC_KVM_PPC_H__
  9. #define __POWERPC_KVM_PPC_H__
  10. /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
  11. * dependencies. */
  12. #include <linux/mutex.h>
  13. #include <linux/timer.h>
  14. #include <linux/types.h>
  15. #include <linux/kvm_types.h>
  16. #include <linux/kvm_host.h>
  17. #include <linux/bug.h>
  18. #ifdef CONFIG_PPC_BOOK3S
  19. #include <asm/kvm_book3s.h>
  20. #else
  21. #include <asm/kvm_booke.h>
  22. #endif
  23. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  24. #include <asm/paca.h>
  25. #include <asm/xive.h>
  26. #include <asm/cpu_has_feature.h>
  27. #endif
  28. /*
  29. * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
  30. * for supporting software breakpoint.
  31. */
  32. #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
  33. enum emulation_result {
  34. EMULATE_DONE, /* no further processing */
  35. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  36. EMULATE_FAIL, /* can't emulate this instruction */
  37. EMULATE_AGAIN, /* something went wrong. go again */
  38. EMULATE_EXIT_USER, /* emulation requires exit to user-space */
  39. };
  40. enum instruction_fetch_type {
  41. INST_GENERIC,
  42. INST_SC, /* system call */
  43. };
  44. enum xlate_instdata {
  45. XLATE_INST, /* translate instruction address */
  46. XLATE_DATA /* translate data address */
  47. };
  48. enum xlate_readwrite {
  49. XLATE_READ, /* check for read permissions */
  50. XLATE_WRITE /* check for write permissions */
  51. };
  52. extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
  53. extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
  54. extern void kvmppc_handler_highmem(void);
  55. extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
  56. extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
  57. unsigned int rt, unsigned int bytes,
  58. int is_default_endian);
  59. extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
  60. unsigned int rt, unsigned int bytes,
  61. int is_default_endian);
  62. extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
  63. unsigned int rt, unsigned int bytes,
  64. int is_default_endian, int mmio_sign_extend);
  65. extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
  66. unsigned int rt, unsigned int bytes, int is_default_endian);
  67. extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
  68. unsigned int rs, unsigned int bytes, int is_default_endian);
  69. extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
  70. u64 val, unsigned int bytes,
  71. int is_default_endian);
  72. extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
  73. int rs, unsigned int bytes,
  74. int is_default_endian);
  75. extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  76. enum instruction_fetch_type type, u32 *inst);
  77. extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  78. bool data);
  79. extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  80. bool data);
  81. extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
  82. extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
  83. extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
  84. extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
  85. extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
  86. extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
  87. extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
  88. extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
  89. extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
  90. /* Core-specific hooks */
  91. extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
  92. unsigned int gtlb_idx);
  93. extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
  94. extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
  95. extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
  96. extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
  97. gva_t eaddr);
  98. extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
  99. extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
  100. extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
  101. enum xlate_instdata xlid, enum xlate_readwrite xlrw,
  102. struct kvmppc_pte *pte);
  103. extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
  104. extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
  105. extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
  106. extern int kvmppc_core_check_processor_compat(void);
  107. extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
  108. struct kvm_translation *tr);
  109. extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
  110. extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
  111. extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
  112. extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
  113. extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
  114. extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
  115. extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
  116. extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
  117. extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
  118. extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
  119. extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
  120. extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
  121. extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  122. struct kvm_interrupt *irq);
  123. extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
  124. extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
  125. ulong esr_flags);
  126. extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
  127. ulong dear_flags,
  128. ulong esr_flags);
  129. extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
  130. extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
  131. ulong esr_flags);
  132. extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
  133. extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
  134. extern int kvmppc_booke_init(void);
  135. extern void kvmppc_booke_exit(void);
  136. extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
  137. extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
  138. extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
  139. extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
  140. extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
  141. extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
  142. extern void kvmppc_rmap_reset(struct kvm *kvm);
  143. extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
  144. struct kvm_memory_slot *memslot, unsigned long porder);
  145. extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
  146. extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  147. struct iommu_group *grp);
  148. extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  149. struct iommu_group *grp);
  150. extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
  151. extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
  152. extern void kvmppc_setup_partition_table(struct kvm *kvm);
  153. extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  154. struct kvm_create_spapr_tce_64 *args);
  155. #define kvmppc_ioba_validate(stt, ioba, npages) \
  156. (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
  157. (stt)->size, (ioba), (npages)) ? \
  158. H_PARAMETER : H_SUCCESS)
  159. extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  160. unsigned long ioba, unsigned long tce);
  161. extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  162. unsigned long liobn, unsigned long ioba,
  163. unsigned long tce_list, unsigned long npages);
  164. extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
  165. unsigned long liobn, unsigned long ioba,
  166. unsigned long tce_value, unsigned long npages);
  167. extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  168. unsigned long ioba);
  169. extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
  170. extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
  171. extern int kvmppc_core_init_vm(struct kvm *kvm);
  172. extern void kvmppc_core_destroy_vm(struct kvm *kvm);
  173. extern void kvmppc_core_free_memslot(struct kvm *kvm,
  174. struct kvm_memory_slot *slot);
  175. extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  176. const struct kvm_memory_slot *old,
  177. struct kvm_memory_slot *new,
  178. enum kvm_mr_change change);
  179. extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
  180. struct kvm_memory_slot *old,
  181. const struct kvm_memory_slot *new,
  182. enum kvm_mr_change change);
  183. extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
  184. struct kvm_ppc_smmu_info *info);
  185. extern void kvmppc_core_flush_memslot(struct kvm *kvm,
  186. struct kvm_memory_slot *memslot);
  187. extern int kvmppc_bookehv_init(void);
  188. extern void kvmppc_bookehv_exit(void);
  189. extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
  190. extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
  191. extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
  192. struct kvm_ppc_resize_hpt *rhpt);
  193. extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
  194. struct kvm_ppc_resize_hpt *rhpt);
  195. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
  196. extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
  197. extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
  198. extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
  199. extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
  200. u32 priority);
  201. extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  202. u32 *priority);
  203. extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
  204. extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
  205. void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
  206. void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
  207. union kvmppc_one_reg {
  208. u32 wval;
  209. u64 dval;
  210. vector128 vval;
  211. u64 vsxval[2];
  212. u32 vsx32val[4];
  213. u16 vsx16val[8];
  214. u8 vsx8val[16];
  215. struct {
  216. u64 addr;
  217. u64 length;
  218. } vpaval;
  219. u64 xive_timaval[2];
  220. };
  221. struct kvmppc_ops {
  222. struct module *owner;
  223. int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  224. int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  225. int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
  226. union kvmppc_one_reg *val);
  227. int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
  228. union kvmppc_one_reg *val);
  229. void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
  230. void (*vcpu_put)(struct kvm_vcpu *vcpu);
  231. void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
  232. void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
  233. int (*vcpu_run)(struct kvm_vcpu *vcpu);
  234. int (*vcpu_create)(struct kvm_vcpu *vcpu);
  235. void (*vcpu_free)(struct kvm_vcpu *vcpu);
  236. int (*check_requests)(struct kvm_vcpu *vcpu);
  237. int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
  238. void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
  239. int (*prepare_memory_region)(struct kvm *kvm,
  240. const struct kvm_memory_slot *old,
  241. struct kvm_memory_slot *new,
  242. enum kvm_mr_change change);
  243. void (*commit_memory_region)(struct kvm *kvm,
  244. struct kvm_memory_slot *old,
  245. const struct kvm_memory_slot *new,
  246. enum kvm_mr_change change);
  247. bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
  248. bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
  249. bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
  250. bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
  251. void (*free_memslot)(struct kvm_memory_slot *slot);
  252. int (*init_vm)(struct kvm *kvm);
  253. void (*destroy_vm)(struct kvm *kvm);
  254. int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
  255. int (*emulate_op)(struct kvm_vcpu *vcpu,
  256. unsigned int inst, int *advance);
  257. int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
  258. int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
  259. void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
  260. long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
  261. unsigned long arg);
  262. int (*hcall_implemented)(unsigned long hcall);
  263. int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
  264. struct irq_bypass_producer *);
  265. void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
  266. struct irq_bypass_producer *);
  267. int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
  268. int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
  269. int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
  270. unsigned long flags);
  271. void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
  272. int (*enable_nested)(struct kvm *kvm);
  273. int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
  274. int size);
  275. int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
  276. int size);
  277. int (*enable_svm)(struct kvm *kvm);
  278. int (*svm_off)(struct kvm *kvm);
  279. int (*enable_dawr1)(struct kvm *kvm);
  280. bool (*hash_v3_possible)(void);
  281. int (*create_vm_debugfs)(struct kvm *kvm);
  282. int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
  283. };
  284. extern struct kvmppc_ops *kvmppc_hv_ops;
  285. extern struct kvmppc_ops *kvmppc_pr_ops;
  286. static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
  287. enum instruction_fetch_type type, u32 *inst)
  288. {
  289. int ret = EMULATE_DONE;
  290. u32 fetched_inst;
  291. /* Load the instruction manually if it failed to do so in the
  292. * exit path */
  293. if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
  294. ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
  295. /* Write fetch_failed unswapped if the fetch failed */
  296. if (ret == EMULATE_DONE)
  297. fetched_inst = kvmppc_need_byteswap(vcpu) ?
  298. swab32(vcpu->arch.last_inst) :
  299. vcpu->arch.last_inst;
  300. else
  301. fetched_inst = vcpu->arch.last_inst;
  302. *inst = fetched_inst;
  303. return ret;
  304. }
  305. static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
  306. {
  307. return kvm->arch.kvm_ops == kvmppc_hv_ops;
  308. }
  309. extern int kvmppc_hwrng_present(void);
  310. /*
  311. * Cuts out inst bits with ordering according to spec.
  312. * That means the leftmost bit is zero. All given bits are included.
  313. */
  314. static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
  315. {
  316. u32 r;
  317. u32 mask;
  318. BUG_ON(msb > lsb);
  319. mask = (1 << (lsb - msb + 1)) - 1;
  320. r = (inst >> (63 - lsb)) & mask;
  321. return r;
  322. }
  323. /*
  324. * Replaces inst bits with ordering according to spec.
  325. */
  326. static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
  327. {
  328. u32 r;
  329. u32 mask;
  330. BUG_ON(msb > lsb);
  331. mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
  332. r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
  333. return r;
  334. }
  335. #define one_reg_size(id) \
  336. (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
  337. #define get_reg_val(id, reg) ({ \
  338. union kvmppc_one_reg __u; \
  339. switch (one_reg_size(id)) { \
  340. case 4: __u.wval = (reg); break; \
  341. case 8: __u.dval = (reg); break; \
  342. default: BUG(); \
  343. } \
  344. __u; \
  345. })
  346. #define set_reg_val(id, val) ({ \
  347. u64 __v; \
  348. switch (one_reg_size(id)) { \
  349. case 4: __v = (val).wval; break; \
  350. case 8: __v = (val).dval; break; \
  351. default: BUG(); \
  352. } \
  353. __v; \
  354. })
  355. int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  356. int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  357. int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  358. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  359. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
  360. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
  361. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
  362. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
  363. void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
  364. struct openpic;
  365. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  366. extern void kvm_cma_reserve(void) __init;
  367. static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
  368. {
  369. paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
  370. }
  371. static inline void kvmppc_set_xive_tima(int cpu,
  372. unsigned long phys_addr,
  373. void __iomem *virt_addr)
  374. {
  375. paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
  376. paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
  377. }
  378. static inline u32 kvmppc_get_xics_latch(void)
  379. {
  380. u32 xirr;
  381. xirr = get_paca()->kvm_hstate.saved_xirr;
  382. get_paca()->kvm_hstate.saved_xirr = 0;
  383. return xirr;
  384. }
  385. /*
  386. * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
  387. * a CPU thread that's running/napping inside of a guest is by default regarded
  388. * as a request to wake the CPU (if needed) and continue execution within the
  389. * guest, potentially to process new state like externally-generated
  390. * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
  391. *
  392. * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
  393. * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
  394. * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
  395. * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
  396. * the receiving side prior to processing the IPI work.
  397. *
  398. * NOTE:
  399. *
  400. * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
  401. * This is to guard against sequences such as the following:
  402. *
  403. * CPU
  404. * X: smp_muxed_ipi_set_message():
  405. * X: smp_mb()
  406. * X: message[RESCHEDULE] = 1
  407. * X: doorbell_global_ipi(42):
  408. * X: kvmppc_set_host_ipi(42)
  409. * X: ppc_msgsnd_sync()/smp_mb()
  410. * X: ppc_msgsnd() -> 42
  411. * 42: doorbell_exception(): // from CPU X
  412. * 42: ppc_msgsync()
  413. * 105: smp_muxed_ipi_set_message():
  414. * 105: smb_mb()
  415. * // STORE DEFERRED DUE TO RE-ORDERING
  416. * --105: message[CALL_FUNCTION] = 1
  417. * | 105: doorbell_global_ipi(42):
  418. * | 105: kvmppc_set_host_ipi(42)
  419. * | 42: kvmppc_clear_host_ipi(42)
  420. * | 42: smp_ipi_demux_relaxed()
  421. * | 42: // returns to executing guest
  422. * | // RE-ORDERED STORE COMPLETES
  423. * ->105: message[CALL_FUNCTION] = 1
  424. * 105: ppc_msgsnd_sync()/smp_mb()
  425. * 105: ppc_msgsnd() -> 42
  426. * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
  427. * 105: // hangs waiting on 42 to process messages/call_single_queue
  428. *
  429. * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
  430. * to guard against sequences such as the following (as well as to create
  431. * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
  432. *
  433. * CPU
  434. * X: smp_muxed_ipi_set_message():
  435. * X: smp_mb()
  436. * X: message[RESCHEDULE] = 1
  437. * X: doorbell_global_ipi(42):
  438. * X: kvmppc_set_host_ipi(42)
  439. * X: ppc_msgsnd_sync()/smp_mb()
  440. * X: ppc_msgsnd() -> 42
  441. * 42: doorbell_exception(): // from CPU X
  442. * 42: ppc_msgsync()
  443. * // STORE DEFERRED DUE TO RE-ORDERING
  444. * -- 42: kvmppc_clear_host_ipi(42)
  445. * | 42: smp_ipi_demux_relaxed()
  446. * | 105: smp_muxed_ipi_set_message():
  447. * | 105: smb_mb()
  448. * | 105: message[CALL_FUNCTION] = 1
  449. * | 105: doorbell_global_ipi(42):
  450. * | 105: kvmppc_set_host_ipi(42)
  451. * | // RE-ORDERED STORE COMPLETES
  452. * -> 42: kvmppc_clear_host_ipi(42)
  453. * 42: // returns to executing guest
  454. * 105: ppc_msgsnd_sync()/smp_mb()
  455. * 105: ppc_msgsnd() -> 42
  456. * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
  457. * 105: // hangs waiting on 42 to process messages/call_single_queue
  458. */
  459. static inline void kvmppc_set_host_ipi(int cpu)
  460. {
  461. /*
  462. * order stores of IPI messages vs. setting of host_ipi flag
  463. *
  464. * pairs with the barrier in kvmppc_clear_host_ipi()
  465. */
  466. smp_mb();
  467. paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
  468. }
  469. static inline void kvmppc_clear_host_ipi(int cpu)
  470. {
  471. paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
  472. /*
  473. * order clearing of host_ipi flag vs. processing of IPI messages
  474. *
  475. * pairs with the barrier in kvmppc_set_host_ipi()
  476. */
  477. smp_mb();
  478. }
  479. static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  480. {
  481. vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
  482. }
  483. extern void kvm_hv_vm_activated(void);
  484. extern void kvm_hv_vm_deactivated(void);
  485. extern bool kvm_hv_mode_active(void);
  486. extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
  487. #else
  488. static inline void __init kvm_cma_reserve(void)
  489. {}
  490. static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
  491. {}
  492. static inline void kvmppc_set_xive_tima(int cpu,
  493. unsigned long phys_addr,
  494. void __iomem *virt_addr)
  495. {}
  496. static inline u32 kvmppc_get_xics_latch(void)
  497. {
  498. return 0;
  499. }
  500. static inline void kvmppc_set_host_ipi(int cpu)
  501. {}
  502. static inline void kvmppc_clear_host_ipi(int cpu)
  503. {}
  504. static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  505. {
  506. kvm_vcpu_kick(vcpu);
  507. }
  508. static inline bool kvm_hv_mode_active(void) { return false; }
  509. #endif
  510. #ifdef CONFIG_PPC_PSERIES
  511. static inline bool kvmhv_on_pseries(void)
  512. {
  513. return !cpu_has_feature(CPU_FTR_HVMODE);
  514. }
  515. #else
  516. static inline bool kvmhv_on_pseries(void)
  517. {
  518. return false;
  519. }
  520. #endif
  521. #ifdef CONFIG_KVM_XICS
  522. static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
  523. {
  524. return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
  525. }
  526. static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
  527. struct kvm *kvm)
  528. {
  529. if (kvm && kvm_irq_bypass)
  530. return kvm->arch.pimap;
  531. return NULL;
  532. }
  533. extern void kvmppc_alloc_host_rm_ops(void);
  534. extern void kvmppc_free_host_rm_ops(void);
  535. extern void kvmppc_free_pimap(struct kvm *kvm);
  536. extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
  537. extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
  538. extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
  539. extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
  540. extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
  541. extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
  542. extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
  543. struct kvm_vcpu *vcpu, u32 cpu);
  544. extern void kvmppc_xics_ipi_action(void);
  545. extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  546. unsigned long host_irq);
  547. extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  548. unsigned long host_irq);
  549. extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
  550. struct kvmppc_irq_map *irq_map,
  551. struct kvmppc_passthru_irqmap *pimap,
  552. bool *again);
  553. extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
  554. int level, bool line_status);
  555. extern int h_ipi_redirect;
  556. #else
  557. static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
  558. struct kvm *kvm)
  559. { return NULL; }
  560. static inline void kvmppc_alloc_host_rm_ops(void) {}
  561. static inline void kvmppc_free_host_rm_ops(void) {}
  562. static inline void kvmppc_free_pimap(struct kvm *kvm) {}
  563. static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
  564. { return 0; }
  565. static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
  566. { return 0; }
  567. static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
  568. static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
  569. { return 0; }
  570. static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
  571. { return 0; }
  572. #endif
  573. #ifdef CONFIG_KVM_XIVE
  574. /*
  575. * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
  576. * ie. P9 new interrupt controller, while the second "xive" is the legacy
  577. * "eXternal Interrupt Vector Entry" which is the configuration of an
  578. * interrupt on the "xics" interrupt controller on P8 and earlier. Those
  579. * two function consume or produce a legacy "XIVE" state from the
  580. * new "XIVE" interrupt controller.
  581. */
  582. extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
  583. u32 priority);
  584. extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  585. u32 *priority);
  586. extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
  587. extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
  588. extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
  589. struct kvm_vcpu *vcpu, u32 cpu);
  590. extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
  591. extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  592. unsigned long host_irq);
  593. extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  594. unsigned long host_irq);
  595. extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
  596. extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
  597. extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
  598. int level, bool line_status);
  599. extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
  600. extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
  601. extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
  602. static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
  603. {
  604. return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
  605. }
  606. extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
  607. struct kvm_vcpu *vcpu, u32 cpu);
  608. extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
  609. extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
  610. union kvmppc_one_reg *val);
  611. extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
  612. union kvmppc_one_reg *val);
  613. extern bool kvmppc_xive_native_supported(void);
  614. #else
  615. static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
  616. u32 priority) { return -1; }
  617. static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  618. u32 *priority) { return -1; }
  619. static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
  620. static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
  621. static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
  622. struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
  623. static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
  624. static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  625. struct irq_desc *host_desc) { return -ENODEV; }
  626. static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  627. struct irq_desc *host_desc) { return -ENODEV; }
  628. static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
  629. static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
  630. static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
  631. int level, bool line_status) { return -ENODEV; }
  632. static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
  633. static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
  634. static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
  635. static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
  636. { return 0; }
  637. static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
  638. struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
  639. static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
  640. static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
  641. union kvmppc_one_reg *val)
  642. { return 0; }
  643. static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
  644. union kvmppc_one_reg *val)
  645. { return -ENOENT; }
  646. #endif /* CONFIG_KVM_XIVE */
  647. #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
  648. static inline bool xics_on_xive(void)
  649. {
  650. return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
  651. }
  652. #else
  653. static inline bool xics_on_xive(void)
  654. {
  655. return false;
  656. }
  657. #endif
  658. /*
  659. * Prototypes for functions called only from assembler code.
  660. * Having prototypes reduces sparse errors.
  661. */
  662. long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  663. unsigned long ioba, unsigned long tce);
  664. long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  665. unsigned long liobn, unsigned long ioba,
  666. unsigned long tce_list, unsigned long npages);
  667. long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
  668. unsigned long liobn, unsigned long ioba,
  669. unsigned long tce_value, unsigned long npages);
  670. long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
  671. unsigned int yield_count);
  672. long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
  673. void kvmhv_commence_exit(int trap);
  674. void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
  675. void kvmppc_subcore_enter_guest(void);
  676. void kvmppc_subcore_exit_guest(void);
  677. long kvmppc_realmode_hmi_handler(void);
  678. long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
  679. long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  680. long pte_index, unsigned long pteh, unsigned long ptel);
  681. long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
  682. unsigned long pte_index, unsigned long avpn);
  683. long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
  684. long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
  685. unsigned long pte_index, unsigned long avpn);
  686. long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
  687. unsigned long pte_index);
  688. long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
  689. unsigned long pte_index);
  690. long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
  691. unsigned long pte_index);
  692. long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
  693. unsigned long dest, unsigned long src);
  694. long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  695. unsigned long slb_v, unsigned int status, bool data);
  696. void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
  697. /*
  698. * Host-side operations we want to set up while running in real
  699. * mode in the guest operating on the xics.
  700. * Currently only VCPU wakeup is supported.
  701. */
  702. union kvmppc_rm_state {
  703. unsigned long raw;
  704. struct {
  705. u32 in_host;
  706. u32 rm_action;
  707. };
  708. };
  709. struct kvmppc_host_rm_core {
  710. union kvmppc_rm_state rm_state;
  711. void *rm_data;
  712. char pad[112];
  713. };
  714. struct kvmppc_host_rm_ops {
  715. struct kvmppc_host_rm_core *rm_core;
  716. void (*vcpu_kick)(struct kvm_vcpu *vcpu);
  717. };
  718. extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
  719. static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
  720. {
  721. #ifdef CONFIG_KVM_BOOKE_HV
  722. return mfspr(SPRN_GEPR);
  723. #elif defined(CONFIG_BOOKE)
  724. return vcpu->arch.epr;
  725. #else
  726. return 0;
  727. #endif
  728. }
  729. static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
  730. {
  731. #ifdef CONFIG_KVM_BOOKE_HV
  732. mtspr(SPRN_GEPR, epr);
  733. #elif defined(CONFIG_BOOKE)
  734. vcpu->arch.epr = epr;
  735. #endif
  736. }
  737. #ifdef CONFIG_KVM_MPIC
  738. void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
  739. int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  740. u32 cpu);
  741. void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
  742. #else
  743. static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
  744. {
  745. }
  746. static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
  747. struct kvm_vcpu *vcpu, u32 cpu)
  748. {
  749. return -EINVAL;
  750. }
  751. static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
  752. struct kvm_vcpu *vcpu)
  753. {
  754. }
  755. #endif /* CONFIG_KVM_MPIC */
  756. int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
  757. struct kvm_config_tlb *cfg);
  758. int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
  759. struct kvm_dirty_tlb *cfg);
  760. long kvmppc_alloc_lpid(void);
  761. void kvmppc_free_lpid(long lpid);
  762. void kvmppc_init_lpid(unsigned long nr_lpids);
  763. static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
  764. {
  765. struct page *page;
  766. /*
  767. * We can only access pages that the kernel maps
  768. * as memory. Bail out for unmapped ones.
  769. */
  770. if (!pfn_valid(pfn))
  771. return;
  772. /* Clear i-cache for new pages */
  773. page = pfn_to_page(pfn);
  774. if (!test_bit(PG_dcache_clean, &page->flags)) {
  775. flush_dcache_icache_page(page);
  776. set_bit(PG_dcache_clean, &page->flags);
  777. }
  778. }
  779. /*
  780. * Shared struct helpers. The shared struct can be little or big endian,
  781. * depending on the guest endianness. So expose helpers to all of them.
  782. */
  783. static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
  784. {
  785. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  786. /* Only Book3S_64 PR supports bi-endian for now */
  787. return vcpu->arch.shared_big_endian;
  788. #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
  789. /* Book3s_64 HV on little endian is always little endian */
  790. return false;
  791. #else
  792. return true;
  793. #endif
  794. }
  795. #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
  796. static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
  797. { \
  798. return mfspr(bookehv_spr); \
  799. } \
  800. #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
  801. static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
  802. { \
  803. mtspr(bookehv_spr, val); \
  804. } \
  805. #define SHARED_WRAPPER_GET(reg, size) \
  806. static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
  807. { \
  808. if (kvmppc_shared_big_endian(vcpu)) \
  809. return be##size##_to_cpu(vcpu->arch.shared->reg); \
  810. else \
  811. return le##size##_to_cpu(vcpu->arch.shared->reg); \
  812. } \
  813. #define SHARED_WRAPPER_SET(reg, size) \
  814. static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
  815. { \
  816. if (kvmppc_shared_big_endian(vcpu)) \
  817. vcpu->arch.shared->reg = cpu_to_be##size(val); \
  818. else \
  819. vcpu->arch.shared->reg = cpu_to_le##size(val); \
  820. } \
  821. #define SHARED_WRAPPER(reg, size) \
  822. SHARED_WRAPPER_GET(reg, size) \
  823. SHARED_WRAPPER_SET(reg, size) \
  824. #define SPRNG_WRAPPER(reg, bookehv_spr) \
  825. SPRNG_WRAPPER_GET(reg, bookehv_spr) \
  826. SPRNG_WRAPPER_SET(reg, bookehv_spr) \
  827. #ifdef CONFIG_KVM_BOOKE_HV
  828. #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
  829. SPRNG_WRAPPER(reg, bookehv_spr) \
  830. #else
  831. #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
  832. SHARED_WRAPPER(reg, size) \
  833. #endif
  834. SHARED_WRAPPER(critical, 64)
  835. SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
  836. SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
  837. SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
  838. SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
  839. SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
  840. SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
  841. SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
  842. SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
  843. SHARED_WRAPPER_GET(msr, 64)
  844. static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
  845. {
  846. if (kvmppc_shared_big_endian(vcpu))
  847. vcpu->arch.shared->msr = cpu_to_be64(val);
  848. else
  849. vcpu->arch.shared->msr = cpu_to_le64(val);
  850. }
  851. SHARED_WRAPPER(dsisr, 32)
  852. SHARED_WRAPPER(int_pending, 32)
  853. SHARED_WRAPPER(sprg4, 64)
  854. SHARED_WRAPPER(sprg5, 64)
  855. SHARED_WRAPPER(sprg6, 64)
  856. SHARED_WRAPPER(sprg7, 64)
  857. static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
  858. {
  859. if (kvmppc_shared_big_endian(vcpu))
  860. return be32_to_cpu(vcpu->arch.shared->sr[nr]);
  861. else
  862. return le32_to_cpu(vcpu->arch.shared->sr[nr]);
  863. }
  864. static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
  865. {
  866. if (kvmppc_shared_big_endian(vcpu))
  867. vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
  868. else
  869. vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
  870. }
  871. /*
  872. * Please call after prepare_to_enter. This function puts the lazy ee and irq
  873. * disabled tracking state back to normal mode, without actually enabling
  874. * interrupts.
  875. */
  876. static inline void kvmppc_fix_ee_before_entry(void)
  877. {
  878. trace_hardirqs_on();
  879. #ifdef CONFIG_PPC64
  880. /*
  881. * To avoid races, the caller must have gone directly from having
  882. * interrupts fully-enabled to hard-disabled.
  883. */
  884. WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
  885. /* Only need to enable IRQs by hard enabling them after this */
  886. local_paca->irq_happened = 0;
  887. irq_soft_mask_set(IRQS_ENABLED);
  888. #endif
  889. }
  890. static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
  891. {
  892. ulong ea;
  893. ulong msr_64bit = 0;
  894. ea = kvmppc_get_gpr(vcpu, rb);
  895. if (ra)
  896. ea += kvmppc_get_gpr(vcpu, ra);
  897. #if defined(CONFIG_PPC_BOOK3E_64)
  898. msr_64bit = MSR_CM;
  899. #elif defined(CONFIG_PPC_BOOK3S_64)
  900. msr_64bit = MSR_SF;
  901. #endif
  902. if (!(kvmppc_get_msr(vcpu) & msr_64bit))
  903. ea = (uint32_t)ea;
  904. return ea;
  905. }
  906. extern void xics_wake_cpu(int cpu);
  907. #endif /* __POWERPC_KVM_PPC_H__ */