powerpc.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright IBM Corp. 2007
  5. *
  6. * Authors: Hollis Blanchard <[email protected]>
  7. * Christian Ehrhardt <[email protected]>
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/err.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/hrtimer.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/fs.h>
  16. #include <linux/slab.h>
  17. #include <linux/file.h>
  18. #include <linux/module.h>
  19. #include <linux/irqbypass.h>
  20. #include <linux/kvm_irqfd.h>
  21. #include <linux/of.h>
  22. #include <asm/cputable.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/kvm_ppc.h>
  25. #include <asm/cputhreads.h>
  26. #include <asm/irqflags.h>
  27. #include <asm/iommu.h>
  28. #include <asm/switch_to.h>
  29. #include <asm/xive.h>
  30. #ifdef CONFIG_PPC_PSERIES
  31. #include <asm/hvcall.h>
  32. #include <asm/plpar_wrappers.h>
  33. #endif
  34. #include <asm/ultravisor.h>
  35. #include <asm/setup.h>
  36. #include "timing.h"
  37. #include "irq.h"
  38. #include "../mm/mmu_decl.h"
  39. #define CREATE_TRACE_POINTS
  40. #include "trace.h"
  41. struct kvmppc_ops *kvmppc_hv_ops;
  42. EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
  43. struct kvmppc_ops *kvmppc_pr_ops;
  44. EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
  45. int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  46. {
  47. return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
  48. }
  49. bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
  50. {
  51. return kvm_arch_vcpu_runnable(vcpu);
  52. }
  53. bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
  54. {
  55. return false;
  56. }
  57. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  58. {
  59. return 1;
  60. }
  61. /*
  62. * Common checks before entering the guest world. Call with interrupts
  63. * disabled.
  64. *
  65. * returns:
  66. *
  67. * == 1 if we're ready to go into guest state
  68. * <= 0 if we need to go back to the host with return value
  69. */
  70. int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
  71. {
  72. int r;
  73. WARN_ON(irqs_disabled());
  74. hard_irq_disable();
  75. while (true) {
  76. if (need_resched()) {
  77. local_irq_enable();
  78. cond_resched();
  79. hard_irq_disable();
  80. continue;
  81. }
  82. if (signal_pending(current)) {
  83. kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  84. vcpu->run->exit_reason = KVM_EXIT_INTR;
  85. r = -EINTR;
  86. break;
  87. }
  88. vcpu->mode = IN_GUEST_MODE;
  89. /*
  90. * Reading vcpu->requests must happen after setting vcpu->mode,
  91. * so we don't miss a request because the requester sees
  92. * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
  93. * before next entering the guest (and thus doesn't IPI).
  94. * This also orders the write to mode from any reads
  95. * to the page tables done while the VCPU is running.
  96. * Please see the comment in kvm_flush_remote_tlbs.
  97. */
  98. smp_mb();
  99. if (kvm_request_pending(vcpu)) {
  100. /* Make sure we process requests preemptable */
  101. local_irq_enable();
  102. trace_kvm_check_requests(vcpu);
  103. r = kvmppc_core_check_requests(vcpu);
  104. hard_irq_disable();
  105. if (r > 0)
  106. continue;
  107. break;
  108. }
  109. if (kvmppc_core_prepare_to_enter(vcpu)) {
  110. /* interrupts got enabled in between, so we
  111. are back at square 1 */
  112. continue;
  113. }
  114. guest_enter_irqoff();
  115. return 1;
  116. }
  117. /* return to host */
  118. local_irq_enable();
  119. return r;
  120. }
  121. EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
  122. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  123. static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
  124. {
  125. struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
  126. int i;
  127. shared->sprg0 = swab64(shared->sprg0);
  128. shared->sprg1 = swab64(shared->sprg1);
  129. shared->sprg2 = swab64(shared->sprg2);
  130. shared->sprg3 = swab64(shared->sprg3);
  131. shared->srr0 = swab64(shared->srr0);
  132. shared->srr1 = swab64(shared->srr1);
  133. shared->dar = swab64(shared->dar);
  134. shared->msr = swab64(shared->msr);
  135. shared->dsisr = swab32(shared->dsisr);
  136. shared->int_pending = swab32(shared->int_pending);
  137. for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
  138. shared->sr[i] = swab32(shared->sr[i]);
  139. }
  140. #endif
  141. int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
  142. {
  143. int nr = kvmppc_get_gpr(vcpu, 11);
  144. int r;
  145. unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
  146. unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
  147. unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
  148. unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
  149. unsigned long r2 = 0;
  150. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  151. /* 32 bit mode */
  152. param1 &= 0xffffffff;
  153. param2 &= 0xffffffff;
  154. param3 &= 0xffffffff;
  155. param4 &= 0xffffffff;
  156. }
  157. switch (nr) {
  158. case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
  159. {
  160. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  161. /* Book3S can be little endian, find it out here */
  162. int shared_big_endian = true;
  163. if (vcpu->arch.intr_msr & MSR_LE)
  164. shared_big_endian = false;
  165. if (shared_big_endian != vcpu->arch.shared_big_endian)
  166. kvmppc_swab_shared(vcpu);
  167. vcpu->arch.shared_big_endian = shared_big_endian;
  168. #endif
  169. if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
  170. /*
  171. * Older versions of the Linux magic page code had
  172. * a bug where they would map their trampoline code
  173. * NX. If that's the case, remove !PR NX capability.
  174. */
  175. vcpu->arch.disable_kernel_nx = true;
  176. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  177. }
  178. vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
  179. vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
  180. #ifdef CONFIG_PPC_64K_PAGES
  181. /*
  182. * Make sure our 4k magic page is in the same window of a 64k
  183. * page within the guest and within the host's page.
  184. */
  185. if ((vcpu->arch.magic_page_pa & 0xf000) !=
  186. ((ulong)vcpu->arch.shared & 0xf000)) {
  187. void *old_shared = vcpu->arch.shared;
  188. ulong shared = (ulong)vcpu->arch.shared;
  189. void *new_shared;
  190. shared &= PAGE_MASK;
  191. shared |= vcpu->arch.magic_page_pa & 0xf000;
  192. new_shared = (void*)shared;
  193. memcpy(new_shared, old_shared, 0x1000);
  194. vcpu->arch.shared = new_shared;
  195. }
  196. #endif
  197. r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
  198. r = EV_SUCCESS;
  199. break;
  200. }
  201. case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
  202. r = EV_SUCCESS;
  203. #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
  204. r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
  205. #endif
  206. /* Second return value is in r4 */
  207. break;
  208. case EV_HCALL_TOKEN(EV_IDLE):
  209. r = EV_SUCCESS;
  210. kvm_vcpu_halt(vcpu);
  211. break;
  212. default:
  213. r = EV_UNIMPLEMENTED;
  214. break;
  215. }
  216. kvmppc_set_gpr(vcpu, 4, r2);
  217. return r;
  218. }
  219. EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
  220. int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
  221. {
  222. int r = false;
  223. /* We have to know what CPU to virtualize */
  224. if (!vcpu->arch.pvr)
  225. goto out;
  226. /* PAPR only works with book3s_64 */
  227. if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
  228. goto out;
  229. /* HV KVM can only do PAPR mode for now */
  230. if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
  231. goto out;
  232. #ifdef CONFIG_KVM_BOOKE_HV
  233. if (!cpu_has_feature(CPU_FTR_EMB_HV))
  234. goto out;
  235. #endif
  236. r = true;
  237. out:
  238. vcpu->arch.sane = r;
  239. return r ? 0 : -EINVAL;
  240. }
  241. EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
  242. int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
  243. {
  244. enum emulation_result er;
  245. int r;
  246. er = kvmppc_emulate_loadstore(vcpu);
  247. switch (er) {
  248. case EMULATE_DONE:
  249. /* Future optimization: only reload non-volatiles if they were
  250. * actually modified. */
  251. r = RESUME_GUEST_NV;
  252. break;
  253. case EMULATE_AGAIN:
  254. r = RESUME_GUEST;
  255. break;
  256. case EMULATE_DO_MMIO:
  257. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  258. /* We must reload nonvolatiles because "update" load/store
  259. * instructions modify register state. */
  260. /* Future optimization: only reload non-volatiles if they were
  261. * actually modified. */
  262. r = RESUME_HOST_NV;
  263. break;
  264. case EMULATE_FAIL:
  265. {
  266. u32 last_inst;
  267. kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
  268. kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
  269. last_inst);
  270. /*
  271. * Injecting a Data Storage here is a bit more
  272. * accurate since the instruction that caused the
  273. * access could still be a valid one.
  274. */
  275. if (!IS_ENABLED(CONFIG_BOOKE)) {
  276. ulong dsisr = DSISR_BADACCESS;
  277. if (vcpu->mmio_is_write)
  278. dsisr |= DSISR_ISSTORE;
  279. kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
  280. } else {
  281. /*
  282. * BookE does not send a SIGBUS on a bad
  283. * fault, so use a Program interrupt instead
  284. * to avoid a fault loop.
  285. */
  286. kvmppc_core_queue_program(vcpu, 0);
  287. }
  288. r = RESUME_GUEST;
  289. break;
  290. }
  291. default:
  292. WARN_ON(1);
  293. r = RESUME_GUEST;
  294. }
  295. return r;
  296. }
  297. EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
  298. int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  299. bool data)
  300. {
  301. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
  302. struct kvmppc_pte pte;
  303. int r = -EINVAL;
  304. vcpu->stat.st++;
  305. if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
  306. r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
  307. size);
  308. if ((!r) || (r == -EAGAIN))
  309. return r;
  310. r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
  311. XLATE_WRITE, &pte);
  312. if (r < 0)
  313. return r;
  314. *eaddr = pte.raddr;
  315. if (!pte.may_write)
  316. return -EPERM;
  317. /* Magic page override */
  318. if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
  319. ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
  320. !(kvmppc_get_msr(vcpu) & MSR_PR)) {
  321. void *magic = vcpu->arch.shared;
  322. magic += pte.eaddr & 0xfff;
  323. memcpy(magic, ptr, size);
  324. return EMULATE_DONE;
  325. }
  326. if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
  327. return EMULATE_DO_MMIO;
  328. return EMULATE_DONE;
  329. }
  330. EXPORT_SYMBOL_GPL(kvmppc_st);
  331. int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  332. bool data)
  333. {
  334. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
  335. struct kvmppc_pte pte;
  336. int rc = -EINVAL;
  337. vcpu->stat.ld++;
  338. if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
  339. rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
  340. size);
  341. if ((!rc) || (rc == -EAGAIN))
  342. return rc;
  343. rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
  344. XLATE_READ, &pte);
  345. if (rc)
  346. return rc;
  347. *eaddr = pte.raddr;
  348. if (!pte.may_read)
  349. return -EPERM;
  350. if (!data && !pte.may_execute)
  351. return -ENOEXEC;
  352. /* Magic page override */
  353. if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
  354. ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
  355. !(kvmppc_get_msr(vcpu) & MSR_PR)) {
  356. void *magic = vcpu->arch.shared;
  357. magic += pte.eaddr & 0xfff;
  358. memcpy(ptr, magic, size);
  359. return EMULATE_DONE;
  360. }
  361. kvm_vcpu_srcu_read_lock(vcpu);
  362. rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
  363. kvm_vcpu_srcu_read_unlock(vcpu);
  364. if (rc)
  365. return EMULATE_DO_MMIO;
  366. return EMULATE_DONE;
  367. }
  368. EXPORT_SYMBOL_GPL(kvmppc_ld);
  369. int kvm_arch_hardware_enable(void)
  370. {
  371. return 0;
  372. }
  373. int kvm_arch_hardware_setup(void *opaque)
  374. {
  375. return 0;
  376. }
  377. int kvm_arch_check_processor_compat(void *opaque)
  378. {
  379. return kvmppc_core_check_processor_compat();
  380. }
  381. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  382. {
  383. struct kvmppc_ops *kvm_ops = NULL;
  384. int r;
  385. /*
  386. * if we have both HV and PR enabled, default is HV
  387. */
  388. if (type == 0) {
  389. if (kvmppc_hv_ops)
  390. kvm_ops = kvmppc_hv_ops;
  391. else
  392. kvm_ops = kvmppc_pr_ops;
  393. if (!kvm_ops)
  394. goto err_out;
  395. } else if (type == KVM_VM_PPC_HV) {
  396. if (!kvmppc_hv_ops)
  397. goto err_out;
  398. kvm_ops = kvmppc_hv_ops;
  399. } else if (type == KVM_VM_PPC_PR) {
  400. if (!kvmppc_pr_ops)
  401. goto err_out;
  402. kvm_ops = kvmppc_pr_ops;
  403. } else
  404. goto err_out;
  405. if (!try_module_get(kvm_ops->owner))
  406. return -ENOENT;
  407. kvm->arch.kvm_ops = kvm_ops;
  408. r = kvmppc_core_init_vm(kvm);
  409. if (r)
  410. module_put(kvm_ops->owner);
  411. return r;
  412. err_out:
  413. return -EINVAL;
  414. }
  415. void kvm_arch_destroy_vm(struct kvm *kvm)
  416. {
  417. #ifdef CONFIG_KVM_XICS
  418. /*
  419. * We call kick_all_cpus_sync() to ensure that all
  420. * CPUs have executed any pending IPIs before we
  421. * continue and free VCPUs structures below.
  422. */
  423. if (is_kvmppc_hv_enabled(kvm))
  424. kick_all_cpus_sync();
  425. #endif
  426. kvm_destroy_vcpus(kvm);
  427. mutex_lock(&kvm->lock);
  428. kvmppc_core_destroy_vm(kvm);
  429. mutex_unlock(&kvm->lock);
  430. /* drop the module reference */
  431. module_put(kvm->arch.kvm_ops->owner);
  432. }
  433. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  434. {
  435. int r;
  436. /* Assume we're using HV mode when the HV module is loaded */
  437. int hv_enabled = kvmppc_hv_ops ? 1 : 0;
  438. if (kvm) {
  439. /*
  440. * Hooray - we know which VM type we're running on. Depend on
  441. * that rather than the guess above.
  442. */
  443. hv_enabled = is_kvmppc_hv_enabled(kvm);
  444. }
  445. switch (ext) {
  446. #ifdef CONFIG_BOOKE
  447. case KVM_CAP_PPC_BOOKE_SREGS:
  448. case KVM_CAP_PPC_BOOKE_WATCHDOG:
  449. case KVM_CAP_PPC_EPR:
  450. #else
  451. case KVM_CAP_PPC_SEGSTATE:
  452. case KVM_CAP_PPC_HIOR:
  453. case KVM_CAP_PPC_PAPR:
  454. #endif
  455. case KVM_CAP_PPC_UNSET_IRQ:
  456. case KVM_CAP_PPC_IRQ_LEVEL:
  457. case KVM_CAP_ENABLE_CAP:
  458. case KVM_CAP_ONE_REG:
  459. case KVM_CAP_IOEVENTFD:
  460. case KVM_CAP_DEVICE_CTRL:
  461. case KVM_CAP_IMMEDIATE_EXIT:
  462. case KVM_CAP_SET_GUEST_DEBUG:
  463. r = 1;
  464. break;
  465. case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
  466. case KVM_CAP_PPC_PAIRED_SINGLES:
  467. case KVM_CAP_PPC_OSI:
  468. case KVM_CAP_PPC_GET_PVINFO:
  469. #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
  470. case KVM_CAP_SW_TLB:
  471. #endif
  472. /* We support this only for PR */
  473. r = !hv_enabled;
  474. break;
  475. #ifdef CONFIG_KVM_MPIC
  476. case KVM_CAP_IRQ_MPIC:
  477. r = 1;
  478. break;
  479. #endif
  480. #ifdef CONFIG_PPC_BOOK3S_64
  481. case KVM_CAP_SPAPR_TCE:
  482. case KVM_CAP_SPAPR_TCE_64:
  483. r = 1;
  484. break;
  485. case KVM_CAP_SPAPR_TCE_VFIO:
  486. r = !!cpu_has_feature(CPU_FTR_HVMODE);
  487. break;
  488. case KVM_CAP_PPC_RTAS:
  489. case KVM_CAP_PPC_FIXUP_HCALL:
  490. case KVM_CAP_PPC_ENABLE_HCALL:
  491. #ifdef CONFIG_KVM_XICS
  492. case KVM_CAP_IRQ_XICS:
  493. #endif
  494. case KVM_CAP_PPC_GET_CPU_CHAR:
  495. r = 1;
  496. break;
  497. #ifdef CONFIG_KVM_XIVE
  498. case KVM_CAP_PPC_IRQ_XIVE:
  499. /*
  500. * We need XIVE to be enabled on the platform (implies
  501. * a POWER9 processor) and the PowerNV platform, as
  502. * nested is not yet supported.
  503. */
  504. r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
  505. kvmppc_xive_native_supported();
  506. break;
  507. #endif
  508. case KVM_CAP_PPC_ALLOC_HTAB:
  509. r = hv_enabled;
  510. break;
  511. #endif /* CONFIG_PPC_BOOK3S_64 */
  512. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  513. case KVM_CAP_PPC_SMT:
  514. r = 0;
  515. if (kvm) {
  516. if (kvm->arch.emul_smt_mode > 1)
  517. r = kvm->arch.emul_smt_mode;
  518. else
  519. r = kvm->arch.smt_mode;
  520. } else if (hv_enabled) {
  521. if (cpu_has_feature(CPU_FTR_ARCH_300))
  522. r = 1;
  523. else
  524. r = threads_per_subcore;
  525. }
  526. break;
  527. case KVM_CAP_PPC_SMT_POSSIBLE:
  528. r = 1;
  529. if (hv_enabled) {
  530. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  531. r = ((threads_per_subcore << 1) - 1);
  532. else
  533. /* P9 can emulate dbells, so allow any mode */
  534. r = 8 | 4 | 2 | 1;
  535. }
  536. break;
  537. case KVM_CAP_PPC_RMA:
  538. r = 0;
  539. break;
  540. case KVM_CAP_PPC_HWRNG:
  541. r = kvmppc_hwrng_present();
  542. break;
  543. case KVM_CAP_PPC_MMU_RADIX:
  544. r = !!(hv_enabled && radix_enabled());
  545. break;
  546. case KVM_CAP_PPC_MMU_HASH_V3:
  547. r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
  548. kvmppc_hv_ops->hash_v3_possible());
  549. break;
  550. case KVM_CAP_PPC_NESTED_HV:
  551. r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
  552. !kvmppc_hv_ops->enable_nested(NULL));
  553. break;
  554. #endif
  555. case KVM_CAP_SYNC_MMU:
  556. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  557. r = hv_enabled;
  558. #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  559. r = 1;
  560. #else
  561. r = 0;
  562. #endif
  563. break;
  564. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  565. case KVM_CAP_PPC_HTAB_FD:
  566. r = hv_enabled;
  567. break;
  568. #endif
  569. case KVM_CAP_NR_VCPUS:
  570. /*
  571. * Recommending a number of CPUs is somewhat arbitrary; we
  572. * return the number of present CPUs for -HV (since a host
  573. * will have secondary threads "offline"), and for other KVM
  574. * implementations just count online CPUs.
  575. */
  576. if (hv_enabled)
  577. r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
  578. else
  579. r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
  580. break;
  581. case KVM_CAP_MAX_VCPUS:
  582. r = KVM_MAX_VCPUS;
  583. break;
  584. case KVM_CAP_MAX_VCPU_ID:
  585. r = KVM_MAX_VCPU_IDS;
  586. break;
  587. #ifdef CONFIG_PPC_BOOK3S_64
  588. case KVM_CAP_PPC_GET_SMMU_INFO:
  589. r = 1;
  590. break;
  591. case KVM_CAP_SPAPR_MULTITCE:
  592. r = 1;
  593. break;
  594. case KVM_CAP_SPAPR_RESIZE_HPT:
  595. r = !!hv_enabled;
  596. break;
  597. #endif
  598. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  599. case KVM_CAP_PPC_FWNMI:
  600. r = hv_enabled;
  601. break;
  602. #endif
  603. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  604. case KVM_CAP_PPC_HTM:
  605. r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
  606. (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
  607. break;
  608. #endif
  609. #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
  610. case KVM_CAP_PPC_SECURE_GUEST:
  611. r = hv_enabled && kvmppc_hv_ops->enable_svm &&
  612. !kvmppc_hv_ops->enable_svm(NULL);
  613. break;
  614. case KVM_CAP_PPC_DAWR1:
  615. r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
  616. !kvmppc_hv_ops->enable_dawr1(NULL));
  617. break;
  618. case KVM_CAP_PPC_RPT_INVALIDATE:
  619. r = 1;
  620. break;
  621. #endif
  622. case KVM_CAP_PPC_AIL_MODE_3:
  623. r = 0;
  624. /*
  625. * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
  626. * The POWER9s can support it if the guest runs in hash mode,
  627. * but QEMU doesn't necessarily query the capability in time.
  628. */
  629. if (hv_enabled) {
  630. if (kvmhv_on_pseries()) {
  631. if (pseries_reloc_on_exception())
  632. r = 1;
  633. } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
  634. !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
  635. r = 1;
  636. }
  637. }
  638. break;
  639. default:
  640. r = 0;
  641. break;
  642. }
  643. return r;
  644. }
  645. long kvm_arch_dev_ioctl(struct file *filp,
  646. unsigned int ioctl, unsigned long arg)
  647. {
  648. return -EINVAL;
  649. }
  650. void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
  651. {
  652. kvmppc_core_free_memslot(kvm, slot);
  653. }
  654. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  655. const struct kvm_memory_slot *old,
  656. struct kvm_memory_slot *new,
  657. enum kvm_mr_change change)
  658. {
  659. return kvmppc_core_prepare_memory_region(kvm, old, new, change);
  660. }
  661. void kvm_arch_commit_memory_region(struct kvm *kvm,
  662. struct kvm_memory_slot *old,
  663. const struct kvm_memory_slot *new,
  664. enum kvm_mr_change change)
  665. {
  666. kvmppc_core_commit_memory_region(kvm, old, new, change);
  667. }
  668. void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  669. struct kvm_memory_slot *slot)
  670. {
  671. kvmppc_core_flush_memslot(kvm, slot);
  672. }
  673. int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
  674. {
  675. return 0;
  676. }
  677. static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
  678. {
  679. struct kvm_vcpu *vcpu;
  680. vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
  681. kvmppc_decrementer_func(vcpu);
  682. return HRTIMER_NORESTART;
  683. }
  684. int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
  685. {
  686. int err;
  687. hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
  688. vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
  689. #ifdef CONFIG_KVM_EXIT_TIMING
  690. mutex_init(&vcpu->arch.exit_timing_lock);
  691. #endif
  692. err = kvmppc_subarch_vcpu_init(vcpu);
  693. if (err)
  694. return err;
  695. err = kvmppc_core_vcpu_create(vcpu);
  696. if (err)
  697. goto out_vcpu_uninit;
  698. rcuwait_init(&vcpu->arch.wait);
  699. vcpu->arch.waitp = &vcpu->arch.wait;
  700. return 0;
  701. out_vcpu_uninit:
  702. kvmppc_subarch_vcpu_uninit(vcpu);
  703. return err;
  704. }
  705. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  706. {
  707. }
  708. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  709. {
  710. /* Make sure we're not using the vcpu anymore */
  711. hrtimer_cancel(&vcpu->arch.dec_timer);
  712. switch (vcpu->arch.irq_type) {
  713. case KVMPPC_IRQ_MPIC:
  714. kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
  715. break;
  716. case KVMPPC_IRQ_XICS:
  717. if (xics_on_xive())
  718. kvmppc_xive_cleanup_vcpu(vcpu);
  719. else
  720. kvmppc_xics_free_icp(vcpu);
  721. break;
  722. case KVMPPC_IRQ_XIVE:
  723. kvmppc_xive_native_cleanup_vcpu(vcpu);
  724. break;
  725. }
  726. kvmppc_core_vcpu_free(vcpu);
  727. kvmppc_subarch_vcpu_uninit(vcpu);
  728. }
  729. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  730. {
  731. return kvmppc_core_pending_dec(vcpu);
  732. }
  733. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  734. {
  735. #ifdef CONFIG_BOOKE
  736. /*
  737. * vrsave (formerly usprg0) isn't used by Linux, but may
  738. * be used by the guest.
  739. *
  740. * On non-booke this is associated with Altivec and
  741. * is handled by code in book3s.c.
  742. */
  743. mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
  744. #endif
  745. kvmppc_core_vcpu_load(vcpu, cpu);
  746. }
  747. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  748. {
  749. kvmppc_core_vcpu_put(vcpu);
  750. #ifdef CONFIG_BOOKE
  751. vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
  752. #endif
  753. }
  754. /*
  755. * irq_bypass_add_producer and irq_bypass_del_producer are only
  756. * useful if the architecture supports PCI passthrough.
  757. * irq_bypass_stop and irq_bypass_start are not needed and so
  758. * kvm_ops are not defined for them.
  759. */
  760. bool kvm_arch_has_irq_bypass(void)
  761. {
  762. return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
  763. (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
  764. }
  765. int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
  766. struct irq_bypass_producer *prod)
  767. {
  768. struct kvm_kernel_irqfd *irqfd =
  769. container_of(cons, struct kvm_kernel_irqfd, consumer);
  770. struct kvm *kvm = irqfd->kvm;
  771. if (kvm->arch.kvm_ops->irq_bypass_add_producer)
  772. return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
  773. return 0;
  774. }
  775. void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
  776. struct irq_bypass_producer *prod)
  777. {
  778. struct kvm_kernel_irqfd *irqfd =
  779. container_of(cons, struct kvm_kernel_irqfd, consumer);
  780. struct kvm *kvm = irqfd->kvm;
  781. if (kvm->arch.kvm_ops->irq_bypass_del_producer)
  782. kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
  783. }
  784. #ifdef CONFIG_VSX
  785. static inline int kvmppc_get_vsr_dword_offset(int index)
  786. {
  787. int offset;
  788. if ((index != 0) && (index != 1))
  789. return -1;
  790. #ifdef __BIG_ENDIAN
  791. offset = index;
  792. #else
  793. offset = 1 - index;
  794. #endif
  795. return offset;
  796. }
  797. static inline int kvmppc_get_vsr_word_offset(int index)
  798. {
  799. int offset;
  800. if ((index > 3) || (index < 0))
  801. return -1;
  802. #ifdef __BIG_ENDIAN
  803. offset = index;
  804. #else
  805. offset = 3 - index;
  806. #endif
  807. return offset;
  808. }
  809. static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
  810. u64 gpr)
  811. {
  812. union kvmppc_one_reg val;
  813. int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
  814. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  815. if (offset == -1)
  816. return;
  817. if (index >= 32) {
  818. val.vval = VCPU_VSX_VR(vcpu, index - 32);
  819. val.vsxval[offset] = gpr;
  820. VCPU_VSX_VR(vcpu, index - 32) = val.vval;
  821. } else {
  822. VCPU_VSX_FPR(vcpu, index, offset) = gpr;
  823. }
  824. }
  825. static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
  826. u64 gpr)
  827. {
  828. union kvmppc_one_reg val;
  829. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  830. if (index >= 32) {
  831. val.vval = VCPU_VSX_VR(vcpu, index - 32);
  832. val.vsxval[0] = gpr;
  833. val.vsxval[1] = gpr;
  834. VCPU_VSX_VR(vcpu, index - 32) = val.vval;
  835. } else {
  836. VCPU_VSX_FPR(vcpu, index, 0) = gpr;
  837. VCPU_VSX_FPR(vcpu, index, 1) = gpr;
  838. }
  839. }
  840. static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
  841. u32 gpr)
  842. {
  843. union kvmppc_one_reg val;
  844. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  845. if (index >= 32) {
  846. val.vsx32val[0] = gpr;
  847. val.vsx32val[1] = gpr;
  848. val.vsx32val[2] = gpr;
  849. val.vsx32val[3] = gpr;
  850. VCPU_VSX_VR(vcpu, index - 32) = val.vval;
  851. } else {
  852. val.vsx32val[0] = gpr;
  853. val.vsx32val[1] = gpr;
  854. VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
  855. VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
  856. }
  857. }
  858. static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
  859. u32 gpr32)
  860. {
  861. union kvmppc_one_reg val;
  862. int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
  863. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  864. int dword_offset, word_offset;
  865. if (offset == -1)
  866. return;
  867. if (index >= 32) {
  868. val.vval = VCPU_VSX_VR(vcpu, index - 32);
  869. val.vsx32val[offset] = gpr32;
  870. VCPU_VSX_VR(vcpu, index - 32) = val.vval;
  871. } else {
  872. dword_offset = offset / 2;
  873. word_offset = offset % 2;
  874. val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
  875. val.vsx32val[word_offset] = gpr32;
  876. VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
  877. }
  878. }
  879. #endif /* CONFIG_VSX */
  880. #ifdef CONFIG_ALTIVEC
  881. static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
  882. int index, int element_size)
  883. {
  884. int offset;
  885. int elts = sizeof(vector128)/element_size;
  886. if ((index < 0) || (index >= elts))
  887. return -1;
  888. if (kvmppc_need_byteswap(vcpu))
  889. offset = elts - index - 1;
  890. else
  891. offset = index;
  892. return offset;
  893. }
  894. static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
  895. int index)
  896. {
  897. return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
  898. }
  899. static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
  900. int index)
  901. {
  902. return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
  903. }
  904. static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
  905. int index)
  906. {
  907. return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
  908. }
  909. static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
  910. int index)
  911. {
  912. return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
  913. }
  914. static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
  915. u64 gpr)
  916. {
  917. union kvmppc_one_reg val;
  918. int offset = kvmppc_get_vmx_dword_offset(vcpu,
  919. vcpu->arch.mmio_vmx_offset);
  920. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  921. if (offset == -1)
  922. return;
  923. val.vval = VCPU_VSX_VR(vcpu, index);
  924. val.vsxval[offset] = gpr;
  925. VCPU_VSX_VR(vcpu, index) = val.vval;
  926. }
  927. static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
  928. u32 gpr32)
  929. {
  930. union kvmppc_one_reg val;
  931. int offset = kvmppc_get_vmx_word_offset(vcpu,
  932. vcpu->arch.mmio_vmx_offset);
  933. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  934. if (offset == -1)
  935. return;
  936. val.vval = VCPU_VSX_VR(vcpu, index);
  937. val.vsx32val[offset] = gpr32;
  938. VCPU_VSX_VR(vcpu, index) = val.vval;
  939. }
  940. static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
  941. u16 gpr16)
  942. {
  943. union kvmppc_one_reg val;
  944. int offset = kvmppc_get_vmx_hword_offset(vcpu,
  945. vcpu->arch.mmio_vmx_offset);
  946. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  947. if (offset == -1)
  948. return;
  949. val.vval = VCPU_VSX_VR(vcpu, index);
  950. val.vsx16val[offset] = gpr16;
  951. VCPU_VSX_VR(vcpu, index) = val.vval;
  952. }
  953. static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
  954. u8 gpr8)
  955. {
  956. union kvmppc_one_reg val;
  957. int offset = kvmppc_get_vmx_byte_offset(vcpu,
  958. vcpu->arch.mmio_vmx_offset);
  959. int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
  960. if (offset == -1)
  961. return;
  962. val.vval = VCPU_VSX_VR(vcpu, index);
  963. val.vsx8val[offset] = gpr8;
  964. VCPU_VSX_VR(vcpu, index) = val.vval;
  965. }
  966. #endif /* CONFIG_ALTIVEC */
  967. #ifdef CONFIG_PPC_FPU
  968. static inline u64 sp_to_dp(u32 fprs)
  969. {
  970. u64 fprd;
  971. preempt_disable();
  972. enable_kernel_fp();
  973. asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
  974. : "fr0");
  975. preempt_enable();
  976. return fprd;
  977. }
  978. static inline u32 dp_to_sp(u64 fprd)
  979. {
  980. u32 fprs;
  981. preempt_disable();
  982. enable_kernel_fp();
  983. asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
  984. : "fr0");
  985. preempt_enable();
  986. return fprs;
  987. }
  988. #else
  989. #define sp_to_dp(x) (x)
  990. #define dp_to_sp(x) (x)
  991. #endif /* CONFIG_PPC_FPU */
  992. static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
  993. {
  994. struct kvm_run *run = vcpu->run;
  995. u64 gpr;
  996. if (run->mmio.len > sizeof(gpr))
  997. return;
  998. if (!vcpu->arch.mmio_host_swabbed) {
  999. switch (run->mmio.len) {
  1000. case 8: gpr = *(u64 *)run->mmio.data; break;
  1001. case 4: gpr = *(u32 *)run->mmio.data; break;
  1002. case 2: gpr = *(u16 *)run->mmio.data; break;
  1003. case 1: gpr = *(u8 *)run->mmio.data; break;
  1004. }
  1005. } else {
  1006. switch (run->mmio.len) {
  1007. case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
  1008. case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
  1009. case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
  1010. case 1: gpr = *(u8 *)run->mmio.data; break;
  1011. }
  1012. }
  1013. /* conversion between single and double precision */
  1014. if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
  1015. gpr = sp_to_dp(gpr);
  1016. if (vcpu->arch.mmio_sign_extend) {
  1017. switch (run->mmio.len) {
  1018. #ifdef CONFIG_PPC64
  1019. case 4:
  1020. gpr = (s64)(s32)gpr;
  1021. break;
  1022. #endif
  1023. case 2:
  1024. gpr = (s64)(s16)gpr;
  1025. break;
  1026. case 1:
  1027. gpr = (s64)(s8)gpr;
  1028. break;
  1029. }
  1030. }
  1031. switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
  1032. case KVM_MMIO_REG_GPR:
  1033. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  1034. break;
  1035. case KVM_MMIO_REG_FPR:
  1036. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  1037. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
  1038. VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
  1039. break;
  1040. #ifdef CONFIG_PPC_BOOK3S
  1041. case KVM_MMIO_REG_QPR:
  1042. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  1043. break;
  1044. case KVM_MMIO_REG_FQPR:
  1045. VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
  1046. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  1047. break;
  1048. #endif
  1049. #ifdef CONFIG_VSX
  1050. case KVM_MMIO_REG_VSX:
  1051. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  1052. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
  1053. if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
  1054. kvmppc_set_vsr_dword(vcpu, gpr);
  1055. else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
  1056. kvmppc_set_vsr_word(vcpu, gpr);
  1057. else if (vcpu->arch.mmio_copy_type ==
  1058. KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
  1059. kvmppc_set_vsr_dword_dump(vcpu, gpr);
  1060. else if (vcpu->arch.mmio_copy_type ==
  1061. KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
  1062. kvmppc_set_vsr_word_dump(vcpu, gpr);
  1063. break;
  1064. #endif
  1065. #ifdef CONFIG_ALTIVEC
  1066. case KVM_MMIO_REG_VMX:
  1067. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  1068. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
  1069. if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
  1070. kvmppc_set_vmx_dword(vcpu, gpr);
  1071. else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
  1072. kvmppc_set_vmx_word(vcpu, gpr);
  1073. else if (vcpu->arch.mmio_copy_type ==
  1074. KVMPPC_VMX_COPY_HWORD)
  1075. kvmppc_set_vmx_hword(vcpu, gpr);
  1076. else if (vcpu->arch.mmio_copy_type ==
  1077. KVMPPC_VMX_COPY_BYTE)
  1078. kvmppc_set_vmx_byte(vcpu, gpr);
  1079. break;
  1080. #endif
  1081. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1082. case KVM_MMIO_REG_NESTED_GPR:
  1083. if (kvmppc_need_byteswap(vcpu))
  1084. gpr = swab64(gpr);
  1085. kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
  1086. sizeof(gpr));
  1087. break;
  1088. #endif
  1089. default:
  1090. BUG();
  1091. }
  1092. }
  1093. static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
  1094. unsigned int rt, unsigned int bytes,
  1095. int is_default_endian, int sign_extend)
  1096. {
  1097. struct kvm_run *run = vcpu->run;
  1098. int idx, ret;
  1099. bool host_swabbed;
  1100. /* Pity C doesn't have a logical XOR operator */
  1101. if (kvmppc_need_byteswap(vcpu)) {
  1102. host_swabbed = is_default_endian;
  1103. } else {
  1104. host_swabbed = !is_default_endian;
  1105. }
  1106. if (bytes > sizeof(run->mmio.data))
  1107. return EMULATE_FAIL;
  1108. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  1109. run->mmio.len = bytes;
  1110. run->mmio.is_write = 0;
  1111. vcpu->arch.io_gpr = rt;
  1112. vcpu->arch.mmio_host_swabbed = host_swabbed;
  1113. vcpu->mmio_needed = 1;
  1114. vcpu->mmio_is_write = 0;
  1115. vcpu->arch.mmio_sign_extend = sign_extend;
  1116. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1117. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
  1118. bytes, &run->mmio.data);
  1119. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1120. if (!ret) {
  1121. kvmppc_complete_mmio_load(vcpu);
  1122. vcpu->mmio_needed = 0;
  1123. return EMULATE_DONE;
  1124. }
  1125. return EMULATE_DO_MMIO;
  1126. }
  1127. int kvmppc_handle_load(struct kvm_vcpu *vcpu,
  1128. unsigned int rt, unsigned int bytes,
  1129. int is_default_endian)
  1130. {
  1131. return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
  1132. }
  1133. EXPORT_SYMBOL_GPL(kvmppc_handle_load);
  1134. /* Same as above, but sign extends */
  1135. int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
  1136. unsigned int rt, unsigned int bytes,
  1137. int is_default_endian)
  1138. {
  1139. return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
  1140. }
  1141. #ifdef CONFIG_VSX
  1142. int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
  1143. unsigned int rt, unsigned int bytes,
  1144. int is_default_endian, int mmio_sign_extend)
  1145. {
  1146. enum emulation_result emulated = EMULATE_DONE;
  1147. /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
  1148. if (vcpu->arch.mmio_vsx_copy_nums > 4)
  1149. return EMULATE_FAIL;
  1150. while (vcpu->arch.mmio_vsx_copy_nums) {
  1151. emulated = __kvmppc_handle_load(vcpu, rt, bytes,
  1152. is_default_endian, mmio_sign_extend);
  1153. if (emulated != EMULATE_DONE)
  1154. break;
  1155. vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
  1156. vcpu->arch.mmio_vsx_copy_nums--;
  1157. vcpu->arch.mmio_vsx_offset++;
  1158. }
  1159. return emulated;
  1160. }
  1161. #endif /* CONFIG_VSX */
  1162. int kvmppc_handle_store(struct kvm_vcpu *vcpu,
  1163. u64 val, unsigned int bytes, int is_default_endian)
  1164. {
  1165. struct kvm_run *run = vcpu->run;
  1166. void *data = run->mmio.data;
  1167. int idx, ret;
  1168. bool host_swabbed;
  1169. /* Pity C doesn't have a logical XOR operator */
  1170. if (kvmppc_need_byteswap(vcpu)) {
  1171. host_swabbed = is_default_endian;
  1172. } else {
  1173. host_swabbed = !is_default_endian;
  1174. }
  1175. if (bytes > sizeof(run->mmio.data))
  1176. return EMULATE_FAIL;
  1177. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  1178. run->mmio.len = bytes;
  1179. run->mmio.is_write = 1;
  1180. vcpu->mmio_needed = 1;
  1181. vcpu->mmio_is_write = 1;
  1182. if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
  1183. val = dp_to_sp(val);
  1184. /* Store the value at the lowest bytes in 'data'. */
  1185. if (!host_swabbed) {
  1186. switch (bytes) {
  1187. case 8: *(u64 *)data = val; break;
  1188. case 4: *(u32 *)data = val; break;
  1189. case 2: *(u16 *)data = val; break;
  1190. case 1: *(u8 *)data = val; break;
  1191. }
  1192. } else {
  1193. switch (bytes) {
  1194. case 8: *(u64 *)data = swab64(val); break;
  1195. case 4: *(u32 *)data = swab32(val); break;
  1196. case 2: *(u16 *)data = swab16(val); break;
  1197. case 1: *(u8 *)data = val; break;
  1198. }
  1199. }
  1200. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1201. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
  1202. bytes, &run->mmio.data);
  1203. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1204. if (!ret) {
  1205. vcpu->mmio_needed = 0;
  1206. return EMULATE_DONE;
  1207. }
  1208. return EMULATE_DO_MMIO;
  1209. }
  1210. EXPORT_SYMBOL_GPL(kvmppc_handle_store);
  1211. #ifdef CONFIG_VSX
  1212. static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
  1213. {
  1214. u32 dword_offset, word_offset;
  1215. union kvmppc_one_reg reg;
  1216. int vsx_offset = 0;
  1217. int copy_type = vcpu->arch.mmio_copy_type;
  1218. int result = 0;
  1219. switch (copy_type) {
  1220. case KVMPPC_VSX_COPY_DWORD:
  1221. vsx_offset =
  1222. kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
  1223. if (vsx_offset == -1) {
  1224. result = -1;
  1225. break;
  1226. }
  1227. if (rs < 32) {
  1228. *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
  1229. } else {
  1230. reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
  1231. *val = reg.vsxval[vsx_offset];
  1232. }
  1233. break;
  1234. case KVMPPC_VSX_COPY_WORD:
  1235. vsx_offset =
  1236. kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
  1237. if (vsx_offset == -1) {
  1238. result = -1;
  1239. break;
  1240. }
  1241. if (rs < 32) {
  1242. dword_offset = vsx_offset / 2;
  1243. word_offset = vsx_offset % 2;
  1244. reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
  1245. *val = reg.vsx32val[word_offset];
  1246. } else {
  1247. reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
  1248. *val = reg.vsx32val[vsx_offset];
  1249. }
  1250. break;
  1251. default:
  1252. result = -1;
  1253. break;
  1254. }
  1255. return result;
  1256. }
  1257. int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
  1258. int rs, unsigned int bytes, int is_default_endian)
  1259. {
  1260. u64 val;
  1261. enum emulation_result emulated = EMULATE_DONE;
  1262. vcpu->arch.io_gpr = rs;
  1263. /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
  1264. if (vcpu->arch.mmio_vsx_copy_nums > 4)
  1265. return EMULATE_FAIL;
  1266. while (vcpu->arch.mmio_vsx_copy_nums) {
  1267. if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
  1268. return EMULATE_FAIL;
  1269. emulated = kvmppc_handle_store(vcpu,
  1270. val, bytes, is_default_endian);
  1271. if (emulated != EMULATE_DONE)
  1272. break;
  1273. vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
  1274. vcpu->arch.mmio_vsx_copy_nums--;
  1275. vcpu->arch.mmio_vsx_offset++;
  1276. }
  1277. return emulated;
  1278. }
  1279. static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
  1280. {
  1281. struct kvm_run *run = vcpu->run;
  1282. enum emulation_result emulated = EMULATE_FAIL;
  1283. int r;
  1284. vcpu->arch.paddr_accessed += run->mmio.len;
  1285. if (!vcpu->mmio_is_write) {
  1286. emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
  1287. run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
  1288. } else {
  1289. emulated = kvmppc_handle_vsx_store(vcpu,
  1290. vcpu->arch.io_gpr, run->mmio.len, 1);
  1291. }
  1292. switch (emulated) {
  1293. case EMULATE_DO_MMIO:
  1294. run->exit_reason = KVM_EXIT_MMIO;
  1295. r = RESUME_HOST;
  1296. break;
  1297. case EMULATE_FAIL:
  1298. pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
  1299. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1300. run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  1301. r = RESUME_HOST;
  1302. break;
  1303. default:
  1304. r = RESUME_GUEST;
  1305. break;
  1306. }
  1307. return r;
  1308. }
  1309. #endif /* CONFIG_VSX */
  1310. #ifdef CONFIG_ALTIVEC
  1311. int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
  1312. unsigned int rt, unsigned int bytes, int is_default_endian)
  1313. {
  1314. enum emulation_result emulated = EMULATE_DONE;
  1315. if (vcpu->arch.mmio_vmx_copy_nums > 2)
  1316. return EMULATE_FAIL;
  1317. while (vcpu->arch.mmio_vmx_copy_nums) {
  1318. emulated = __kvmppc_handle_load(vcpu, rt, bytes,
  1319. is_default_endian, 0);
  1320. if (emulated != EMULATE_DONE)
  1321. break;
  1322. vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
  1323. vcpu->arch.mmio_vmx_copy_nums--;
  1324. vcpu->arch.mmio_vmx_offset++;
  1325. }
  1326. return emulated;
  1327. }
  1328. static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
  1329. {
  1330. union kvmppc_one_reg reg;
  1331. int vmx_offset = 0;
  1332. int result = 0;
  1333. vmx_offset =
  1334. kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
  1335. if (vmx_offset == -1)
  1336. return -1;
  1337. reg.vval = VCPU_VSX_VR(vcpu, index);
  1338. *val = reg.vsxval[vmx_offset];
  1339. return result;
  1340. }
  1341. static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
  1342. {
  1343. union kvmppc_one_reg reg;
  1344. int vmx_offset = 0;
  1345. int result = 0;
  1346. vmx_offset =
  1347. kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
  1348. if (vmx_offset == -1)
  1349. return -1;
  1350. reg.vval = VCPU_VSX_VR(vcpu, index);
  1351. *val = reg.vsx32val[vmx_offset];
  1352. return result;
  1353. }
  1354. static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
  1355. {
  1356. union kvmppc_one_reg reg;
  1357. int vmx_offset = 0;
  1358. int result = 0;
  1359. vmx_offset =
  1360. kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
  1361. if (vmx_offset == -1)
  1362. return -1;
  1363. reg.vval = VCPU_VSX_VR(vcpu, index);
  1364. *val = reg.vsx16val[vmx_offset];
  1365. return result;
  1366. }
  1367. static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
  1368. {
  1369. union kvmppc_one_reg reg;
  1370. int vmx_offset = 0;
  1371. int result = 0;
  1372. vmx_offset =
  1373. kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
  1374. if (vmx_offset == -1)
  1375. return -1;
  1376. reg.vval = VCPU_VSX_VR(vcpu, index);
  1377. *val = reg.vsx8val[vmx_offset];
  1378. return result;
  1379. }
  1380. int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
  1381. unsigned int rs, unsigned int bytes, int is_default_endian)
  1382. {
  1383. u64 val = 0;
  1384. unsigned int index = rs & KVM_MMIO_REG_MASK;
  1385. enum emulation_result emulated = EMULATE_DONE;
  1386. if (vcpu->arch.mmio_vmx_copy_nums > 2)
  1387. return EMULATE_FAIL;
  1388. vcpu->arch.io_gpr = rs;
  1389. while (vcpu->arch.mmio_vmx_copy_nums) {
  1390. switch (vcpu->arch.mmio_copy_type) {
  1391. case KVMPPC_VMX_COPY_DWORD:
  1392. if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
  1393. return EMULATE_FAIL;
  1394. break;
  1395. case KVMPPC_VMX_COPY_WORD:
  1396. if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
  1397. return EMULATE_FAIL;
  1398. break;
  1399. case KVMPPC_VMX_COPY_HWORD:
  1400. if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
  1401. return EMULATE_FAIL;
  1402. break;
  1403. case KVMPPC_VMX_COPY_BYTE:
  1404. if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
  1405. return EMULATE_FAIL;
  1406. break;
  1407. default:
  1408. return EMULATE_FAIL;
  1409. }
  1410. emulated = kvmppc_handle_store(vcpu, val, bytes,
  1411. is_default_endian);
  1412. if (emulated != EMULATE_DONE)
  1413. break;
  1414. vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
  1415. vcpu->arch.mmio_vmx_copy_nums--;
  1416. vcpu->arch.mmio_vmx_offset++;
  1417. }
  1418. return emulated;
  1419. }
  1420. static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
  1421. {
  1422. struct kvm_run *run = vcpu->run;
  1423. enum emulation_result emulated = EMULATE_FAIL;
  1424. int r;
  1425. vcpu->arch.paddr_accessed += run->mmio.len;
  1426. if (!vcpu->mmio_is_write) {
  1427. emulated = kvmppc_handle_vmx_load(vcpu,
  1428. vcpu->arch.io_gpr, run->mmio.len, 1);
  1429. } else {
  1430. emulated = kvmppc_handle_vmx_store(vcpu,
  1431. vcpu->arch.io_gpr, run->mmio.len, 1);
  1432. }
  1433. switch (emulated) {
  1434. case EMULATE_DO_MMIO:
  1435. run->exit_reason = KVM_EXIT_MMIO;
  1436. r = RESUME_HOST;
  1437. break;
  1438. case EMULATE_FAIL:
  1439. pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
  1440. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1441. run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  1442. r = RESUME_HOST;
  1443. break;
  1444. default:
  1445. r = RESUME_GUEST;
  1446. break;
  1447. }
  1448. return r;
  1449. }
  1450. #endif /* CONFIG_ALTIVEC */
  1451. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1452. {
  1453. int r = 0;
  1454. union kvmppc_one_reg val;
  1455. int size;
  1456. size = one_reg_size(reg->id);
  1457. if (size > sizeof(val))
  1458. return -EINVAL;
  1459. r = kvmppc_get_one_reg(vcpu, reg->id, &val);
  1460. if (r == -EINVAL) {
  1461. r = 0;
  1462. switch (reg->id) {
  1463. #ifdef CONFIG_ALTIVEC
  1464. case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
  1465. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  1466. r = -ENXIO;
  1467. break;
  1468. }
  1469. val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
  1470. break;
  1471. case KVM_REG_PPC_VSCR:
  1472. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  1473. r = -ENXIO;
  1474. break;
  1475. }
  1476. val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
  1477. break;
  1478. case KVM_REG_PPC_VRSAVE:
  1479. val = get_reg_val(reg->id, vcpu->arch.vrsave);
  1480. break;
  1481. #endif /* CONFIG_ALTIVEC */
  1482. default:
  1483. r = -EINVAL;
  1484. break;
  1485. }
  1486. }
  1487. if (r)
  1488. return r;
  1489. if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
  1490. r = -EFAULT;
  1491. return r;
  1492. }
  1493. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1494. {
  1495. int r;
  1496. union kvmppc_one_reg val;
  1497. int size;
  1498. size = one_reg_size(reg->id);
  1499. if (size > sizeof(val))
  1500. return -EINVAL;
  1501. if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
  1502. return -EFAULT;
  1503. r = kvmppc_set_one_reg(vcpu, reg->id, &val);
  1504. if (r == -EINVAL) {
  1505. r = 0;
  1506. switch (reg->id) {
  1507. #ifdef CONFIG_ALTIVEC
  1508. case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
  1509. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  1510. r = -ENXIO;
  1511. break;
  1512. }
  1513. vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
  1514. break;
  1515. case KVM_REG_PPC_VSCR:
  1516. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  1517. r = -ENXIO;
  1518. break;
  1519. }
  1520. vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
  1521. break;
  1522. case KVM_REG_PPC_VRSAVE:
  1523. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  1524. r = -ENXIO;
  1525. break;
  1526. }
  1527. vcpu->arch.vrsave = set_reg_val(reg->id, val);
  1528. break;
  1529. #endif /* CONFIG_ALTIVEC */
  1530. default:
  1531. r = -EINVAL;
  1532. break;
  1533. }
  1534. }
  1535. return r;
  1536. }
  1537. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  1538. {
  1539. struct kvm_run *run = vcpu->run;
  1540. int r;
  1541. vcpu_load(vcpu);
  1542. if (vcpu->mmio_needed) {
  1543. vcpu->mmio_needed = 0;
  1544. if (!vcpu->mmio_is_write)
  1545. kvmppc_complete_mmio_load(vcpu);
  1546. #ifdef CONFIG_VSX
  1547. if (vcpu->arch.mmio_vsx_copy_nums > 0) {
  1548. vcpu->arch.mmio_vsx_copy_nums--;
  1549. vcpu->arch.mmio_vsx_offset++;
  1550. }
  1551. if (vcpu->arch.mmio_vsx_copy_nums > 0) {
  1552. r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
  1553. if (r == RESUME_HOST) {
  1554. vcpu->mmio_needed = 1;
  1555. goto out;
  1556. }
  1557. }
  1558. #endif
  1559. #ifdef CONFIG_ALTIVEC
  1560. if (vcpu->arch.mmio_vmx_copy_nums > 0) {
  1561. vcpu->arch.mmio_vmx_copy_nums--;
  1562. vcpu->arch.mmio_vmx_offset++;
  1563. }
  1564. if (vcpu->arch.mmio_vmx_copy_nums > 0) {
  1565. r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
  1566. if (r == RESUME_HOST) {
  1567. vcpu->mmio_needed = 1;
  1568. goto out;
  1569. }
  1570. }
  1571. #endif
  1572. } else if (vcpu->arch.osi_needed) {
  1573. u64 *gprs = run->osi.gprs;
  1574. int i;
  1575. for (i = 0; i < 32; i++)
  1576. kvmppc_set_gpr(vcpu, i, gprs[i]);
  1577. vcpu->arch.osi_needed = 0;
  1578. } else if (vcpu->arch.hcall_needed) {
  1579. int i;
  1580. kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
  1581. for (i = 0; i < 9; ++i)
  1582. kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
  1583. vcpu->arch.hcall_needed = 0;
  1584. #ifdef CONFIG_BOOKE
  1585. } else if (vcpu->arch.epr_needed) {
  1586. kvmppc_set_epr(vcpu, run->epr.epr);
  1587. vcpu->arch.epr_needed = 0;
  1588. #endif
  1589. }
  1590. kvm_sigset_activate(vcpu);
  1591. if (run->immediate_exit)
  1592. r = -EINTR;
  1593. else
  1594. r = kvmppc_vcpu_run(vcpu);
  1595. kvm_sigset_deactivate(vcpu);
  1596. #ifdef CONFIG_ALTIVEC
  1597. out:
  1598. #endif
  1599. /*
  1600. * We're already returning to userspace, don't pass the
  1601. * RESUME_HOST flags along.
  1602. */
  1603. if (r > 0)
  1604. r = 0;
  1605. vcpu_put(vcpu);
  1606. return r;
  1607. }
  1608. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
  1609. {
  1610. if (irq->irq == KVM_INTERRUPT_UNSET) {
  1611. kvmppc_core_dequeue_external(vcpu);
  1612. return 0;
  1613. }
  1614. kvmppc_core_queue_external(vcpu, irq);
  1615. kvm_vcpu_kick(vcpu);
  1616. return 0;
  1617. }
  1618. static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
  1619. struct kvm_enable_cap *cap)
  1620. {
  1621. int r;
  1622. if (cap->flags)
  1623. return -EINVAL;
  1624. switch (cap->cap) {
  1625. case KVM_CAP_PPC_OSI:
  1626. r = 0;
  1627. vcpu->arch.osi_enabled = true;
  1628. break;
  1629. case KVM_CAP_PPC_PAPR:
  1630. r = 0;
  1631. vcpu->arch.papr_enabled = true;
  1632. break;
  1633. case KVM_CAP_PPC_EPR:
  1634. r = 0;
  1635. if (cap->args[0])
  1636. vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
  1637. else
  1638. vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
  1639. break;
  1640. #ifdef CONFIG_BOOKE
  1641. case KVM_CAP_PPC_BOOKE_WATCHDOG:
  1642. r = 0;
  1643. vcpu->arch.watchdog_enabled = true;
  1644. break;
  1645. #endif
  1646. #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
  1647. case KVM_CAP_SW_TLB: {
  1648. struct kvm_config_tlb cfg;
  1649. void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
  1650. r = -EFAULT;
  1651. if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
  1652. break;
  1653. r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
  1654. break;
  1655. }
  1656. #endif
  1657. #ifdef CONFIG_KVM_MPIC
  1658. case KVM_CAP_IRQ_MPIC: {
  1659. struct fd f;
  1660. struct kvm_device *dev;
  1661. r = -EBADF;
  1662. f = fdget(cap->args[0]);
  1663. if (!f.file)
  1664. break;
  1665. r = -EPERM;
  1666. dev = kvm_device_from_filp(f.file);
  1667. if (dev)
  1668. r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
  1669. fdput(f);
  1670. break;
  1671. }
  1672. #endif
  1673. #ifdef CONFIG_KVM_XICS
  1674. case KVM_CAP_IRQ_XICS: {
  1675. struct fd f;
  1676. struct kvm_device *dev;
  1677. r = -EBADF;
  1678. f = fdget(cap->args[0]);
  1679. if (!f.file)
  1680. break;
  1681. r = -EPERM;
  1682. dev = kvm_device_from_filp(f.file);
  1683. if (dev) {
  1684. if (xics_on_xive())
  1685. r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
  1686. else
  1687. r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
  1688. }
  1689. fdput(f);
  1690. break;
  1691. }
  1692. #endif /* CONFIG_KVM_XICS */
  1693. #ifdef CONFIG_KVM_XIVE
  1694. case KVM_CAP_PPC_IRQ_XIVE: {
  1695. struct fd f;
  1696. struct kvm_device *dev;
  1697. r = -EBADF;
  1698. f = fdget(cap->args[0]);
  1699. if (!f.file)
  1700. break;
  1701. r = -ENXIO;
  1702. if (!xive_enabled())
  1703. break;
  1704. r = -EPERM;
  1705. dev = kvm_device_from_filp(f.file);
  1706. if (dev)
  1707. r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
  1708. cap->args[1]);
  1709. fdput(f);
  1710. break;
  1711. }
  1712. #endif /* CONFIG_KVM_XIVE */
  1713. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1714. case KVM_CAP_PPC_FWNMI:
  1715. r = -EINVAL;
  1716. if (!is_kvmppc_hv_enabled(vcpu->kvm))
  1717. break;
  1718. r = 0;
  1719. vcpu->kvm->arch.fwnmi_enabled = true;
  1720. break;
  1721. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  1722. default:
  1723. r = -EINVAL;
  1724. break;
  1725. }
  1726. if (!r)
  1727. r = kvmppc_sanity_check(vcpu);
  1728. return r;
  1729. }
  1730. bool kvm_arch_intc_initialized(struct kvm *kvm)
  1731. {
  1732. #ifdef CONFIG_KVM_MPIC
  1733. if (kvm->arch.mpic)
  1734. return true;
  1735. #endif
  1736. #ifdef CONFIG_KVM_XICS
  1737. if (kvm->arch.xics || kvm->arch.xive)
  1738. return true;
  1739. #endif
  1740. return false;
  1741. }
  1742. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  1743. struct kvm_mp_state *mp_state)
  1744. {
  1745. return -EINVAL;
  1746. }
  1747. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  1748. struct kvm_mp_state *mp_state)
  1749. {
  1750. return -EINVAL;
  1751. }
  1752. long kvm_arch_vcpu_async_ioctl(struct file *filp,
  1753. unsigned int ioctl, unsigned long arg)
  1754. {
  1755. struct kvm_vcpu *vcpu = filp->private_data;
  1756. void __user *argp = (void __user *)arg;
  1757. if (ioctl == KVM_INTERRUPT) {
  1758. struct kvm_interrupt irq;
  1759. if (copy_from_user(&irq, argp, sizeof(irq)))
  1760. return -EFAULT;
  1761. return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  1762. }
  1763. return -ENOIOCTLCMD;
  1764. }
  1765. long kvm_arch_vcpu_ioctl(struct file *filp,
  1766. unsigned int ioctl, unsigned long arg)
  1767. {
  1768. struct kvm_vcpu *vcpu = filp->private_data;
  1769. void __user *argp = (void __user *)arg;
  1770. long r;
  1771. switch (ioctl) {
  1772. case KVM_ENABLE_CAP:
  1773. {
  1774. struct kvm_enable_cap cap;
  1775. r = -EFAULT;
  1776. if (copy_from_user(&cap, argp, sizeof(cap)))
  1777. goto out;
  1778. vcpu_load(vcpu);
  1779. r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
  1780. vcpu_put(vcpu);
  1781. break;
  1782. }
  1783. case KVM_SET_ONE_REG:
  1784. case KVM_GET_ONE_REG:
  1785. {
  1786. struct kvm_one_reg reg;
  1787. r = -EFAULT;
  1788. if (copy_from_user(&reg, argp, sizeof(reg)))
  1789. goto out;
  1790. if (ioctl == KVM_SET_ONE_REG)
  1791. r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
  1792. else
  1793. r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
  1794. break;
  1795. }
  1796. #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
  1797. case KVM_DIRTY_TLB: {
  1798. struct kvm_dirty_tlb dirty;
  1799. r = -EFAULT;
  1800. if (copy_from_user(&dirty, argp, sizeof(dirty)))
  1801. goto out;
  1802. vcpu_load(vcpu);
  1803. r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
  1804. vcpu_put(vcpu);
  1805. break;
  1806. }
  1807. #endif
  1808. default:
  1809. r = -EINVAL;
  1810. }
  1811. out:
  1812. return r;
  1813. }
  1814. vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  1815. {
  1816. return VM_FAULT_SIGBUS;
  1817. }
  1818. static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
  1819. {
  1820. u32 inst_nop = 0x60000000;
  1821. #ifdef CONFIG_KVM_BOOKE_HV
  1822. u32 inst_sc1 = 0x44000022;
  1823. pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
  1824. pvinfo->hcall[1] = cpu_to_be32(inst_nop);
  1825. pvinfo->hcall[2] = cpu_to_be32(inst_nop);
  1826. pvinfo->hcall[3] = cpu_to_be32(inst_nop);
  1827. #else
  1828. u32 inst_lis = 0x3c000000;
  1829. u32 inst_ori = 0x60000000;
  1830. u32 inst_sc = 0x44000002;
  1831. u32 inst_imm_mask = 0xffff;
  1832. /*
  1833. * The hypercall to get into KVM from within guest context is as
  1834. * follows:
  1835. *
  1836. * lis r0, r0, KVM_SC_MAGIC_R0@h
  1837. * ori r0, KVM_SC_MAGIC_R0@l
  1838. * sc
  1839. * nop
  1840. */
  1841. pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
  1842. pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
  1843. pvinfo->hcall[2] = cpu_to_be32(inst_sc);
  1844. pvinfo->hcall[3] = cpu_to_be32(inst_nop);
  1845. #endif
  1846. pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
  1847. return 0;
  1848. }
  1849. int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
  1850. bool line_status)
  1851. {
  1852. if (!irqchip_in_kernel(kvm))
  1853. return -ENXIO;
  1854. irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  1855. irq_event->irq, irq_event->level,
  1856. line_status);
  1857. return 0;
  1858. }
  1859. int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
  1860. struct kvm_enable_cap *cap)
  1861. {
  1862. int r;
  1863. if (cap->flags)
  1864. return -EINVAL;
  1865. switch (cap->cap) {
  1866. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  1867. case KVM_CAP_PPC_ENABLE_HCALL: {
  1868. unsigned long hcall = cap->args[0];
  1869. r = -EINVAL;
  1870. if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
  1871. cap->args[1] > 1)
  1872. break;
  1873. if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
  1874. break;
  1875. if (cap->args[1])
  1876. set_bit(hcall / 4, kvm->arch.enabled_hcalls);
  1877. else
  1878. clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
  1879. r = 0;
  1880. break;
  1881. }
  1882. case KVM_CAP_PPC_SMT: {
  1883. unsigned long mode = cap->args[0];
  1884. unsigned long flags = cap->args[1];
  1885. r = -EINVAL;
  1886. if (kvm->arch.kvm_ops->set_smt_mode)
  1887. r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
  1888. break;
  1889. }
  1890. case KVM_CAP_PPC_NESTED_HV:
  1891. r = -EINVAL;
  1892. if (!is_kvmppc_hv_enabled(kvm) ||
  1893. !kvm->arch.kvm_ops->enable_nested)
  1894. break;
  1895. r = kvm->arch.kvm_ops->enable_nested(kvm);
  1896. break;
  1897. #endif
  1898. #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
  1899. case KVM_CAP_PPC_SECURE_GUEST:
  1900. r = -EINVAL;
  1901. if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
  1902. break;
  1903. r = kvm->arch.kvm_ops->enable_svm(kvm);
  1904. break;
  1905. case KVM_CAP_PPC_DAWR1:
  1906. r = -EINVAL;
  1907. if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
  1908. break;
  1909. r = kvm->arch.kvm_ops->enable_dawr1(kvm);
  1910. break;
  1911. #endif
  1912. default:
  1913. r = -EINVAL;
  1914. break;
  1915. }
  1916. return r;
  1917. }
  1918. #ifdef CONFIG_PPC_BOOK3S_64
  1919. /*
  1920. * These functions check whether the underlying hardware is safe
  1921. * against attacks based on observing the effects of speculatively
  1922. * executed instructions, and whether it supplies instructions for
  1923. * use in workarounds. The information comes from firmware, either
  1924. * via the device tree on powernv platforms or from an hcall on
  1925. * pseries platforms.
  1926. */
  1927. #ifdef CONFIG_PPC_PSERIES
  1928. static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
  1929. {
  1930. struct h_cpu_char_result c;
  1931. unsigned long rc;
  1932. if (!machine_is(pseries))
  1933. return -ENOTTY;
  1934. rc = plpar_get_cpu_characteristics(&c);
  1935. if (rc == H_SUCCESS) {
  1936. cp->character = c.character;
  1937. cp->behaviour = c.behaviour;
  1938. cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
  1939. KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
  1940. KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
  1941. KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
  1942. KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
  1943. KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
  1944. KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
  1945. KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
  1946. KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
  1947. cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
  1948. KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
  1949. KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
  1950. KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
  1951. }
  1952. return 0;
  1953. }
  1954. #else
  1955. static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
  1956. {
  1957. return -ENOTTY;
  1958. }
  1959. #endif
  1960. static inline bool have_fw_feat(struct device_node *fw_features,
  1961. const char *state, const char *name)
  1962. {
  1963. struct device_node *np;
  1964. bool r = false;
  1965. np = of_get_child_by_name(fw_features, name);
  1966. if (np) {
  1967. r = of_property_read_bool(np, state);
  1968. of_node_put(np);
  1969. }
  1970. return r;
  1971. }
  1972. static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
  1973. {
  1974. struct device_node *np, *fw_features;
  1975. int r;
  1976. memset(cp, 0, sizeof(*cp));
  1977. r = pseries_get_cpu_char(cp);
  1978. if (r != -ENOTTY)
  1979. return r;
  1980. np = of_find_node_by_name(NULL, "ibm,opal");
  1981. if (np) {
  1982. fw_features = of_get_child_by_name(np, "fw-features");
  1983. of_node_put(np);
  1984. if (!fw_features)
  1985. return 0;
  1986. if (have_fw_feat(fw_features, "enabled",
  1987. "inst-spec-barrier-ori31,31,0"))
  1988. cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
  1989. if (have_fw_feat(fw_features, "enabled",
  1990. "fw-bcctrl-serialized"))
  1991. cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
  1992. if (have_fw_feat(fw_features, "enabled",
  1993. "inst-l1d-flush-ori30,30,0"))
  1994. cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
  1995. if (have_fw_feat(fw_features, "enabled",
  1996. "inst-l1d-flush-trig2"))
  1997. cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
  1998. if (have_fw_feat(fw_features, "enabled",
  1999. "fw-l1d-thread-split"))
  2000. cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
  2001. if (have_fw_feat(fw_features, "enabled",
  2002. "fw-count-cache-disabled"))
  2003. cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
  2004. if (have_fw_feat(fw_features, "enabled",
  2005. "fw-count-cache-flush-bcctr2,0,0"))
  2006. cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
  2007. cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
  2008. KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
  2009. KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
  2010. KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
  2011. KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
  2012. KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
  2013. KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
  2014. if (have_fw_feat(fw_features, "enabled",
  2015. "speculation-policy-favor-security"))
  2016. cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
  2017. if (!have_fw_feat(fw_features, "disabled",
  2018. "needs-l1d-flush-msr-pr-0-to-1"))
  2019. cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
  2020. if (!have_fw_feat(fw_features, "disabled",
  2021. "needs-spec-barrier-for-bound-checks"))
  2022. cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
  2023. if (have_fw_feat(fw_features, "enabled",
  2024. "needs-count-cache-flush-on-context-switch"))
  2025. cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
  2026. cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
  2027. KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
  2028. KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
  2029. KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
  2030. of_node_put(fw_features);
  2031. }
  2032. return 0;
  2033. }
  2034. #endif
  2035. long kvm_arch_vm_ioctl(struct file *filp,
  2036. unsigned int ioctl, unsigned long arg)
  2037. {
  2038. struct kvm *kvm __maybe_unused = filp->private_data;
  2039. void __user *argp = (void __user *)arg;
  2040. long r;
  2041. switch (ioctl) {
  2042. case KVM_PPC_GET_PVINFO: {
  2043. struct kvm_ppc_pvinfo pvinfo;
  2044. memset(&pvinfo, 0, sizeof(pvinfo));
  2045. r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
  2046. if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
  2047. r = -EFAULT;
  2048. goto out;
  2049. }
  2050. break;
  2051. }
  2052. #ifdef CONFIG_SPAPR_TCE_IOMMU
  2053. case KVM_CREATE_SPAPR_TCE_64: {
  2054. struct kvm_create_spapr_tce_64 create_tce_64;
  2055. r = -EFAULT;
  2056. if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
  2057. goto out;
  2058. if (create_tce_64.flags) {
  2059. r = -EINVAL;
  2060. goto out;
  2061. }
  2062. r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
  2063. goto out;
  2064. }
  2065. case KVM_CREATE_SPAPR_TCE: {
  2066. struct kvm_create_spapr_tce create_tce;
  2067. struct kvm_create_spapr_tce_64 create_tce_64;
  2068. r = -EFAULT;
  2069. if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
  2070. goto out;
  2071. create_tce_64.liobn = create_tce.liobn;
  2072. create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
  2073. create_tce_64.offset = 0;
  2074. create_tce_64.size = create_tce.window_size >>
  2075. IOMMU_PAGE_SHIFT_4K;
  2076. create_tce_64.flags = 0;
  2077. r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
  2078. goto out;
  2079. }
  2080. #endif
  2081. #ifdef CONFIG_PPC_BOOK3S_64
  2082. case KVM_PPC_GET_SMMU_INFO: {
  2083. struct kvm_ppc_smmu_info info;
  2084. struct kvm *kvm = filp->private_data;
  2085. memset(&info, 0, sizeof(info));
  2086. r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
  2087. if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
  2088. r = -EFAULT;
  2089. break;
  2090. }
  2091. case KVM_PPC_RTAS_DEFINE_TOKEN: {
  2092. struct kvm *kvm = filp->private_data;
  2093. r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
  2094. break;
  2095. }
  2096. case KVM_PPC_CONFIGURE_V3_MMU: {
  2097. struct kvm *kvm = filp->private_data;
  2098. struct kvm_ppc_mmuv3_cfg cfg;
  2099. r = -EINVAL;
  2100. if (!kvm->arch.kvm_ops->configure_mmu)
  2101. goto out;
  2102. r = -EFAULT;
  2103. if (copy_from_user(&cfg, argp, sizeof(cfg)))
  2104. goto out;
  2105. r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
  2106. break;
  2107. }
  2108. case KVM_PPC_GET_RMMU_INFO: {
  2109. struct kvm *kvm = filp->private_data;
  2110. struct kvm_ppc_rmmu_info info;
  2111. r = -EINVAL;
  2112. if (!kvm->arch.kvm_ops->get_rmmu_info)
  2113. goto out;
  2114. r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
  2115. if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
  2116. r = -EFAULT;
  2117. break;
  2118. }
  2119. case KVM_PPC_GET_CPU_CHAR: {
  2120. struct kvm_ppc_cpu_char cpuchar;
  2121. r = kvmppc_get_cpu_char(&cpuchar);
  2122. if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
  2123. r = -EFAULT;
  2124. break;
  2125. }
  2126. case KVM_PPC_SVM_OFF: {
  2127. struct kvm *kvm = filp->private_data;
  2128. r = 0;
  2129. if (!kvm->arch.kvm_ops->svm_off)
  2130. goto out;
  2131. r = kvm->arch.kvm_ops->svm_off(kvm);
  2132. break;
  2133. }
  2134. default: {
  2135. struct kvm *kvm = filp->private_data;
  2136. r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
  2137. }
  2138. #else /* CONFIG_PPC_BOOK3S_64 */
  2139. default:
  2140. r = -ENOTTY;
  2141. #endif
  2142. }
  2143. out:
  2144. return r;
  2145. }
  2146. static DEFINE_IDA(lpid_inuse);
  2147. static unsigned long nr_lpids;
  2148. long kvmppc_alloc_lpid(void)
  2149. {
  2150. int lpid;
  2151. /* The host LPID must always be 0 (allocation starts at 1) */
  2152. lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
  2153. if (lpid < 0) {
  2154. if (lpid == -ENOMEM)
  2155. pr_err("%s: Out of memory\n", __func__);
  2156. else
  2157. pr_err("%s: No LPIDs free\n", __func__);
  2158. return -ENOMEM;
  2159. }
  2160. return lpid;
  2161. }
  2162. EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
  2163. void kvmppc_free_lpid(long lpid)
  2164. {
  2165. ida_free(&lpid_inuse, lpid);
  2166. }
  2167. EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
  2168. /* nr_lpids_param includes the host LPID */
  2169. void kvmppc_init_lpid(unsigned long nr_lpids_param)
  2170. {
  2171. nr_lpids = nr_lpids_param;
  2172. }
  2173. EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
  2174. int kvm_arch_init(void *opaque)
  2175. {
  2176. return 0;
  2177. }
  2178. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
  2179. void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
  2180. {
  2181. if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
  2182. vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
  2183. }
  2184. int kvm_arch_create_vm_debugfs(struct kvm *kvm)
  2185. {
  2186. if (kvm->arch.kvm_ops->create_vm_debugfs)
  2187. kvm->arch.kvm_ops->create_vm_debugfs(kvm);
  2188. return 0;
  2189. }