book3s_xive_native.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2017-2019, IBM Corporation.
  4. */
  5. #define pr_fmt(fmt) "xive-kvm: " fmt
  6. #include <linux/kernel.h>
  7. #include <linux/kvm_host.h>
  8. #include <linux/err.h>
  9. #include <linux/gfp.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/delay.h>
  12. #include <linux/file.h>
  13. #include <linux/irqdomain.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/kvm_book3s.h>
  16. #include <asm/kvm_ppc.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/xive.h>
  19. #include <asm/xive-regs.h>
  20. #include <asm/debug.h>
  21. #include <asm/opal.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/seq_file.h>
  24. #include "book3s_xive.h"
  25. static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
  26. {
  27. u64 val;
  28. /*
  29. * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
  30. * load operation, so there is no need to enforce load-after-store
  31. * ordering.
  32. */
  33. val = in_be64(xd->eoi_mmio + offset);
  34. return (u8)val;
  35. }
  36. static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
  37. {
  38. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  39. struct xive_q *q = &xc->queues[prio];
  40. xive_native_disable_queue(xc->vp_id, q, prio);
  41. if (q->qpage) {
  42. put_page(virt_to_page(q->qpage));
  43. q->qpage = NULL;
  44. }
  45. }
  46. static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
  47. u8 prio, __be32 *qpage,
  48. u32 order, bool can_escalate)
  49. {
  50. int rc;
  51. __be32 *qpage_prev = q->qpage;
  52. rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
  53. can_escalate);
  54. if (rc)
  55. return rc;
  56. if (qpage_prev)
  57. put_page(virt_to_page(qpage_prev));
  58. return rc;
  59. }
  60. void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
  61. {
  62. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  63. int i;
  64. if (!kvmppc_xive_enabled(vcpu))
  65. return;
  66. if (!xc)
  67. return;
  68. pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
  69. /* Ensure no interrupt is still routed to that VP */
  70. xc->valid = false;
  71. kvmppc_xive_disable_vcpu_interrupts(vcpu);
  72. /* Free escalations */
  73. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  74. /* Free the escalation irq */
  75. if (xc->esc_virq[i]) {
  76. if (kvmppc_xive_has_single_escalation(xc->xive))
  77. xive_cleanup_single_escalation(vcpu, xc,
  78. xc->esc_virq[i]);
  79. free_irq(xc->esc_virq[i], vcpu);
  80. irq_dispose_mapping(xc->esc_virq[i]);
  81. kfree(xc->esc_virq_names[i]);
  82. xc->esc_virq[i] = 0;
  83. }
  84. }
  85. /* Disable the VP */
  86. xive_native_disable_vp(xc->vp_id);
  87. /* Clear the cam word so guest entry won't try to push context */
  88. vcpu->arch.xive_cam_word = 0;
  89. /* Free the queues */
  90. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  91. kvmppc_xive_native_cleanup_queue(vcpu, i);
  92. }
  93. /* Free the VP */
  94. kfree(xc);
  95. /* Cleanup the vcpu */
  96. vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
  97. vcpu->arch.xive_vcpu = NULL;
  98. }
  99. int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
  100. struct kvm_vcpu *vcpu, u32 server_num)
  101. {
  102. struct kvmppc_xive *xive = dev->private;
  103. struct kvmppc_xive_vcpu *xc = NULL;
  104. int rc;
  105. u32 vp_id;
  106. pr_devel("native_connect_vcpu(server=%d)\n", server_num);
  107. if (dev->ops != &kvm_xive_native_ops) {
  108. pr_devel("Wrong ops !\n");
  109. return -EPERM;
  110. }
  111. if (xive->kvm != vcpu->kvm)
  112. return -EPERM;
  113. if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
  114. return -EBUSY;
  115. mutex_lock(&xive->lock);
  116. rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
  117. if (rc)
  118. goto bail;
  119. xc = kzalloc(sizeof(*xc), GFP_KERNEL);
  120. if (!xc) {
  121. rc = -ENOMEM;
  122. goto bail;
  123. }
  124. vcpu->arch.xive_vcpu = xc;
  125. xc->xive = xive;
  126. xc->vcpu = vcpu;
  127. xc->server_num = server_num;
  128. xc->vp_id = vp_id;
  129. xc->valid = true;
  130. vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
  131. rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
  132. if (rc) {
  133. pr_err("Failed to get VP info from OPAL: %d\n", rc);
  134. goto bail;
  135. }
  136. if (!kvmppc_xive_check_save_restore(vcpu)) {
  137. pr_err("inconsistent save-restore setup for VCPU %d\n", server_num);
  138. rc = -EIO;
  139. goto bail;
  140. }
  141. /*
  142. * Enable the VP first as the single escalation mode will
  143. * affect escalation interrupts numbering
  144. */
  145. rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
  146. if (rc) {
  147. pr_err("Failed to enable VP in OPAL: %d\n", rc);
  148. goto bail;
  149. }
  150. /* Configure VCPU fields for use by assembly push/pull */
  151. vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
  152. vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
  153. /* TODO: reset all queues to a clean state ? */
  154. bail:
  155. mutex_unlock(&xive->lock);
  156. if (rc)
  157. kvmppc_xive_native_cleanup_vcpu(vcpu);
  158. return rc;
  159. }
  160. /*
  161. * Device passthrough support
  162. */
  163. static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
  164. {
  165. struct kvmppc_xive *xive = kvm->arch.xive;
  166. pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
  167. if (irq >= KVMPPC_XIVE_NR_IRQS)
  168. return -EINVAL;
  169. /*
  170. * Clear the ESB pages of the IRQ number being mapped (or
  171. * unmapped) into the guest and let the VM fault handler
  172. * repopulate with the appropriate ESB pages (device or IC)
  173. */
  174. pr_debug("clearing esb pages for girq 0x%lx\n", irq);
  175. mutex_lock(&xive->mapping_lock);
  176. if (xive->mapping)
  177. unmap_mapping_range(xive->mapping,
  178. esb_pgoff << PAGE_SHIFT,
  179. 2ull << PAGE_SHIFT, 1);
  180. mutex_unlock(&xive->mapping_lock);
  181. return 0;
  182. }
  183. static struct kvmppc_xive_ops kvmppc_xive_native_ops = {
  184. .reset_mapped = kvmppc_xive_native_reset_mapped,
  185. };
  186. static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
  187. {
  188. struct vm_area_struct *vma = vmf->vma;
  189. struct kvm_device *dev = vma->vm_file->private_data;
  190. struct kvmppc_xive *xive = dev->private;
  191. struct kvmppc_xive_src_block *sb;
  192. struct kvmppc_xive_irq_state *state;
  193. struct xive_irq_data *xd;
  194. u32 hw_num;
  195. u16 src;
  196. u64 page;
  197. unsigned long irq;
  198. u64 page_offset;
  199. /*
  200. * Linux/KVM uses a two pages ESB setting, one for trigger and
  201. * one for EOI
  202. */
  203. page_offset = vmf->pgoff - vma->vm_pgoff;
  204. irq = page_offset / 2;
  205. sb = kvmppc_xive_find_source(xive, irq, &src);
  206. if (!sb) {
  207. pr_devel("%s: source %lx not found !\n", __func__, irq);
  208. return VM_FAULT_SIGBUS;
  209. }
  210. state = &sb->irq_state[src];
  211. /* Some sanity checking */
  212. if (!state->valid) {
  213. pr_devel("%s: source %lx invalid !\n", __func__, irq);
  214. return VM_FAULT_SIGBUS;
  215. }
  216. kvmppc_xive_select_irq(state, &hw_num, &xd);
  217. arch_spin_lock(&sb->lock);
  218. /*
  219. * first/even page is for trigger
  220. * second/odd page is for EOI and management.
  221. */
  222. page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
  223. arch_spin_unlock(&sb->lock);
  224. if (WARN_ON(!page)) {
  225. pr_err("%s: accessing invalid ESB page for source %lx !\n",
  226. __func__, irq);
  227. return VM_FAULT_SIGBUS;
  228. }
  229. vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
  230. return VM_FAULT_NOPAGE;
  231. }
  232. static const struct vm_operations_struct xive_native_esb_vmops = {
  233. .fault = xive_native_esb_fault,
  234. };
  235. static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
  236. {
  237. struct vm_area_struct *vma = vmf->vma;
  238. switch (vmf->pgoff - vma->vm_pgoff) {
  239. case 0: /* HW - forbid access */
  240. case 1: /* HV - forbid access */
  241. return VM_FAULT_SIGBUS;
  242. case 2: /* OS */
  243. vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
  244. return VM_FAULT_NOPAGE;
  245. case 3: /* USER - TODO */
  246. default:
  247. return VM_FAULT_SIGBUS;
  248. }
  249. }
  250. static const struct vm_operations_struct xive_native_tima_vmops = {
  251. .fault = xive_native_tima_fault,
  252. };
  253. static int kvmppc_xive_native_mmap(struct kvm_device *dev,
  254. struct vm_area_struct *vma)
  255. {
  256. struct kvmppc_xive *xive = dev->private;
  257. /* We only allow mappings at fixed offset for now */
  258. if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
  259. if (vma_pages(vma) > 4)
  260. return -EINVAL;
  261. vma->vm_ops = &xive_native_tima_vmops;
  262. } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
  263. if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
  264. return -EINVAL;
  265. vma->vm_ops = &xive_native_esb_vmops;
  266. } else {
  267. return -EINVAL;
  268. }
  269. vm_flags_set(vma, VM_IO | VM_PFNMAP);
  270. vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
  271. /*
  272. * Grab the KVM device file address_space to be able to clear
  273. * the ESB pages mapping when a device is passed-through into
  274. * the guest.
  275. */
  276. xive->mapping = vma->vm_file->f_mapping;
  277. return 0;
  278. }
  279. static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
  280. u64 addr)
  281. {
  282. struct kvmppc_xive_src_block *sb;
  283. struct kvmppc_xive_irq_state *state;
  284. u64 __user *ubufp = (u64 __user *) addr;
  285. u64 val;
  286. u16 idx;
  287. int rc;
  288. pr_devel("%s irq=0x%lx\n", __func__, irq);
  289. if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
  290. return -E2BIG;
  291. sb = kvmppc_xive_find_source(xive, irq, &idx);
  292. if (!sb) {
  293. pr_debug("No source, creating source block...\n");
  294. sb = kvmppc_xive_create_src_block(xive, irq);
  295. if (!sb) {
  296. pr_err("Failed to create block...\n");
  297. return -ENOMEM;
  298. }
  299. }
  300. state = &sb->irq_state[idx];
  301. if (get_user(val, ubufp)) {
  302. pr_err("fault getting user info !\n");
  303. return -EFAULT;
  304. }
  305. arch_spin_lock(&sb->lock);
  306. /*
  307. * If the source doesn't already have an IPI, allocate
  308. * one and get the corresponding data
  309. */
  310. if (!state->ipi_number) {
  311. state->ipi_number = xive_native_alloc_irq();
  312. if (state->ipi_number == 0) {
  313. pr_err("Failed to allocate IRQ !\n");
  314. rc = -ENXIO;
  315. goto unlock;
  316. }
  317. xive_native_populate_irq_data(state->ipi_number,
  318. &state->ipi_data);
  319. pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
  320. state->ipi_number, irq);
  321. }
  322. /* Restore LSI state */
  323. if (val & KVM_XIVE_LEVEL_SENSITIVE) {
  324. state->lsi = true;
  325. if (val & KVM_XIVE_LEVEL_ASSERTED)
  326. state->asserted = true;
  327. pr_devel(" LSI ! Asserted=%d\n", state->asserted);
  328. }
  329. /* Mask IRQ to start with */
  330. state->act_server = 0;
  331. state->act_priority = MASKED;
  332. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
  333. xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
  334. /* Increment the number of valid sources and mark this one valid */
  335. if (!state->valid)
  336. xive->src_count++;
  337. state->valid = true;
  338. rc = 0;
  339. unlock:
  340. arch_spin_unlock(&sb->lock);
  341. return rc;
  342. }
  343. static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
  344. struct kvmppc_xive_src_block *sb,
  345. struct kvmppc_xive_irq_state *state,
  346. u32 server, u8 priority, bool masked,
  347. u32 eisn)
  348. {
  349. struct kvm *kvm = xive->kvm;
  350. u32 hw_num;
  351. int rc = 0;
  352. arch_spin_lock(&sb->lock);
  353. if (state->act_server == server && state->act_priority == priority &&
  354. state->eisn == eisn)
  355. goto unlock;
  356. pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
  357. priority, server, masked, state->act_server,
  358. state->act_priority);
  359. kvmppc_xive_select_irq(state, &hw_num, NULL);
  360. if (priority != MASKED && !masked) {
  361. rc = kvmppc_xive_select_target(kvm, &server, priority);
  362. if (rc)
  363. goto unlock;
  364. state->act_priority = priority;
  365. state->act_server = server;
  366. state->eisn = eisn;
  367. rc = xive_native_configure_irq(hw_num,
  368. kvmppc_xive_vp(xive, server),
  369. priority, eisn);
  370. } else {
  371. state->act_priority = MASKED;
  372. state->act_server = 0;
  373. state->eisn = 0;
  374. rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
  375. }
  376. unlock:
  377. arch_spin_unlock(&sb->lock);
  378. return rc;
  379. }
  380. static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
  381. long irq, u64 addr)
  382. {
  383. struct kvmppc_xive_src_block *sb;
  384. struct kvmppc_xive_irq_state *state;
  385. u64 __user *ubufp = (u64 __user *) addr;
  386. u16 src;
  387. u64 kvm_cfg;
  388. u32 server;
  389. u8 priority;
  390. bool masked;
  391. u32 eisn;
  392. sb = kvmppc_xive_find_source(xive, irq, &src);
  393. if (!sb)
  394. return -ENOENT;
  395. state = &sb->irq_state[src];
  396. if (!state->valid)
  397. return -EINVAL;
  398. if (get_user(kvm_cfg, ubufp))
  399. return -EFAULT;
  400. pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
  401. priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
  402. KVM_XIVE_SOURCE_PRIORITY_SHIFT;
  403. server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
  404. KVM_XIVE_SOURCE_SERVER_SHIFT;
  405. masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
  406. KVM_XIVE_SOURCE_MASKED_SHIFT;
  407. eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
  408. KVM_XIVE_SOURCE_EISN_SHIFT;
  409. if (priority != xive_prio_from_guest(priority)) {
  410. pr_err("invalid priority for queue %d for VCPU %d\n",
  411. priority, server);
  412. return -EINVAL;
  413. }
  414. return kvmppc_xive_native_update_source_config(xive, sb, state, server,
  415. priority, masked, eisn);
  416. }
  417. static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
  418. long irq, u64 addr)
  419. {
  420. struct kvmppc_xive_src_block *sb;
  421. struct kvmppc_xive_irq_state *state;
  422. struct xive_irq_data *xd;
  423. u32 hw_num;
  424. u16 src;
  425. int rc = 0;
  426. pr_devel("%s irq=0x%lx", __func__, irq);
  427. sb = kvmppc_xive_find_source(xive, irq, &src);
  428. if (!sb)
  429. return -ENOENT;
  430. state = &sb->irq_state[src];
  431. rc = -EINVAL;
  432. arch_spin_lock(&sb->lock);
  433. if (state->valid) {
  434. kvmppc_xive_select_irq(state, &hw_num, &xd);
  435. xive_native_sync_source(hw_num);
  436. rc = 0;
  437. }
  438. arch_spin_unlock(&sb->lock);
  439. return rc;
  440. }
  441. static int xive_native_validate_queue_size(u32 qshift)
  442. {
  443. /*
  444. * We only support 64K pages for the moment. This is also
  445. * advertised in the DT property "ibm,xive-eq-sizes"
  446. */
  447. switch (qshift) {
  448. case 0: /* EQ reset */
  449. case 16:
  450. return 0;
  451. case 12:
  452. case 21:
  453. case 24:
  454. default:
  455. return -EINVAL;
  456. }
  457. }
  458. static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
  459. long eq_idx, u64 addr)
  460. {
  461. struct kvm *kvm = xive->kvm;
  462. struct kvm_vcpu *vcpu;
  463. struct kvmppc_xive_vcpu *xc;
  464. void __user *ubufp = (void __user *) addr;
  465. u32 server;
  466. u8 priority;
  467. struct kvm_ppc_xive_eq kvm_eq;
  468. int rc;
  469. __be32 *qaddr = 0;
  470. struct page *page;
  471. struct xive_q *q;
  472. gfn_t gfn;
  473. unsigned long page_size;
  474. int srcu_idx;
  475. /*
  476. * Demangle priority/server tuple from the EQ identifier
  477. */
  478. priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
  479. KVM_XIVE_EQ_PRIORITY_SHIFT;
  480. server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
  481. KVM_XIVE_EQ_SERVER_SHIFT;
  482. if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
  483. return -EFAULT;
  484. vcpu = kvmppc_xive_find_server(kvm, server);
  485. if (!vcpu) {
  486. pr_err("Can't find server %d\n", server);
  487. return -ENOENT;
  488. }
  489. xc = vcpu->arch.xive_vcpu;
  490. if (priority != xive_prio_from_guest(priority)) {
  491. pr_err("Trying to restore invalid queue %d for VCPU %d\n",
  492. priority, server);
  493. return -EINVAL;
  494. }
  495. q = &xc->queues[priority];
  496. pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
  497. __func__, server, priority, kvm_eq.flags,
  498. kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
  499. /* reset queue and disable queueing */
  500. if (!kvm_eq.qshift) {
  501. q->guest_qaddr = 0;
  502. q->guest_qshift = 0;
  503. rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
  504. NULL, 0, true);
  505. if (rc) {
  506. pr_err("Failed to reset queue %d for VCPU %d: %d\n",
  507. priority, xc->server_num, rc);
  508. return rc;
  509. }
  510. return 0;
  511. }
  512. /*
  513. * sPAPR specifies a "Unconditional Notify (n) flag" for the
  514. * H_INT_SET_QUEUE_CONFIG hcall which forces notification
  515. * without using the coalescing mechanisms provided by the
  516. * XIVE END ESBs. This is required on KVM as notification
  517. * using the END ESBs is not supported.
  518. */
  519. if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
  520. pr_err("invalid flags %d\n", kvm_eq.flags);
  521. return -EINVAL;
  522. }
  523. rc = xive_native_validate_queue_size(kvm_eq.qshift);
  524. if (rc) {
  525. pr_err("invalid queue size %d\n", kvm_eq.qshift);
  526. return rc;
  527. }
  528. if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
  529. pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
  530. 1ull << kvm_eq.qshift);
  531. return -EINVAL;
  532. }
  533. srcu_idx = srcu_read_lock(&kvm->srcu);
  534. gfn = gpa_to_gfn(kvm_eq.qaddr);
  535. page_size = kvm_host_page_size(vcpu, gfn);
  536. if (1ull << kvm_eq.qshift > page_size) {
  537. srcu_read_unlock(&kvm->srcu, srcu_idx);
  538. pr_warn("Incompatible host page size %lx!\n", page_size);
  539. return -EINVAL;
  540. }
  541. page = gfn_to_page(kvm, gfn);
  542. if (is_error_page(page)) {
  543. srcu_read_unlock(&kvm->srcu, srcu_idx);
  544. pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
  545. return -EINVAL;
  546. }
  547. qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
  548. srcu_read_unlock(&kvm->srcu, srcu_idx);
  549. /*
  550. * Backup the queue page guest address to the mark EQ page
  551. * dirty for migration.
  552. */
  553. q->guest_qaddr = kvm_eq.qaddr;
  554. q->guest_qshift = kvm_eq.qshift;
  555. /*
  556. * Unconditional Notification is forced by default at the
  557. * OPAL level because the use of END ESBs is not supported by
  558. * Linux.
  559. */
  560. rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
  561. (__be32 *) qaddr, kvm_eq.qshift, true);
  562. if (rc) {
  563. pr_err("Failed to configure queue %d for VCPU %d: %d\n",
  564. priority, xc->server_num, rc);
  565. put_page(page);
  566. return rc;
  567. }
  568. /*
  569. * Only restore the queue state when needed. When doing the
  570. * H_INT_SET_SOURCE_CONFIG hcall, it should not.
  571. */
  572. if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
  573. rc = xive_native_set_queue_state(xc->vp_id, priority,
  574. kvm_eq.qtoggle,
  575. kvm_eq.qindex);
  576. if (rc)
  577. goto error;
  578. }
  579. rc = kvmppc_xive_attach_escalation(vcpu, priority,
  580. kvmppc_xive_has_single_escalation(xive));
  581. error:
  582. if (rc)
  583. kvmppc_xive_native_cleanup_queue(vcpu, priority);
  584. return rc;
  585. }
  586. static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
  587. long eq_idx, u64 addr)
  588. {
  589. struct kvm *kvm = xive->kvm;
  590. struct kvm_vcpu *vcpu;
  591. struct kvmppc_xive_vcpu *xc;
  592. struct xive_q *q;
  593. void __user *ubufp = (u64 __user *) addr;
  594. u32 server;
  595. u8 priority;
  596. struct kvm_ppc_xive_eq kvm_eq;
  597. u64 qaddr;
  598. u64 qshift;
  599. u64 qeoi_page;
  600. u32 escalate_irq;
  601. u64 qflags;
  602. int rc;
  603. /*
  604. * Demangle priority/server tuple from the EQ identifier
  605. */
  606. priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
  607. KVM_XIVE_EQ_PRIORITY_SHIFT;
  608. server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
  609. KVM_XIVE_EQ_SERVER_SHIFT;
  610. vcpu = kvmppc_xive_find_server(kvm, server);
  611. if (!vcpu) {
  612. pr_err("Can't find server %d\n", server);
  613. return -ENOENT;
  614. }
  615. xc = vcpu->arch.xive_vcpu;
  616. if (priority != xive_prio_from_guest(priority)) {
  617. pr_err("invalid priority for queue %d for VCPU %d\n",
  618. priority, server);
  619. return -EINVAL;
  620. }
  621. q = &xc->queues[priority];
  622. memset(&kvm_eq, 0, sizeof(kvm_eq));
  623. if (!q->qpage)
  624. return 0;
  625. rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
  626. &qeoi_page, &escalate_irq, &qflags);
  627. if (rc)
  628. return rc;
  629. kvm_eq.flags = 0;
  630. if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
  631. kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
  632. kvm_eq.qshift = q->guest_qshift;
  633. kvm_eq.qaddr = q->guest_qaddr;
  634. rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
  635. &kvm_eq.qindex);
  636. if (rc)
  637. return rc;
  638. pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
  639. __func__, server, priority, kvm_eq.flags,
  640. kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
  641. if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
  642. return -EFAULT;
  643. return 0;
  644. }
  645. static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
  646. {
  647. int i;
  648. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  649. struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
  650. if (!state->valid)
  651. continue;
  652. if (state->act_priority == MASKED)
  653. continue;
  654. state->eisn = 0;
  655. state->act_server = 0;
  656. state->act_priority = MASKED;
  657. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
  658. xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
  659. if (state->pt_number) {
  660. xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
  661. xive_native_configure_irq(state->pt_number,
  662. 0, MASKED, 0);
  663. }
  664. }
  665. }
  666. static int kvmppc_xive_reset(struct kvmppc_xive *xive)
  667. {
  668. struct kvm *kvm = xive->kvm;
  669. struct kvm_vcpu *vcpu;
  670. unsigned long i;
  671. pr_devel("%s\n", __func__);
  672. mutex_lock(&xive->lock);
  673. kvm_for_each_vcpu(i, vcpu, kvm) {
  674. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  675. unsigned int prio;
  676. if (!xc)
  677. continue;
  678. kvmppc_xive_disable_vcpu_interrupts(vcpu);
  679. for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
  680. /* Single escalation, no queue 7 */
  681. if (prio == 7 && kvmppc_xive_has_single_escalation(xive))
  682. break;
  683. if (xc->esc_virq[prio]) {
  684. free_irq(xc->esc_virq[prio], vcpu);
  685. irq_dispose_mapping(xc->esc_virq[prio]);
  686. kfree(xc->esc_virq_names[prio]);
  687. xc->esc_virq[prio] = 0;
  688. }
  689. kvmppc_xive_native_cleanup_queue(vcpu, prio);
  690. }
  691. }
  692. for (i = 0; i <= xive->max_sbid; i++) {
  693. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  694. if (sb) {
  695. arch_spin_lock(&sb->lock);
  696. kvmppc_xive_reset_sources(sb);
  697. arch_spin_unlock(&sb->lock);
  698. }
  699. }
  700. mutex_unlock(&xive->lock);
  701. return 0;
  702. }
  703. static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
  704. {
  705. int j;
  706. for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
  707. struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
  708. struct xive_irq_data *xd;
  709. u32 hw_num;
  710. if (!state->valid)
  711. continue;
  712. /*
  713. * The struct kvmppc_xive_irq_state reflects the state
  714. * of the EAS configuration and not the state of the
  715. * source. The source is masked setting the PQ bits to
  716. * '-Q', which is what is being done before calling
  717. * the KVM_DEV_XIVE_EQ_SYNC control.
  718. *
  719. * If a source EAS is configured, OPAL syncs the XIVE
  720. * IC of the source and the XIVE IC of the previous
  721. * target if any.
  722. *
  723. * So it should be fine ignoring MASKED sources as
  724. * they have been synced already.
  725. */
  726. if (state->act_priority == MASKED)
  727. continue;
  728. kvmppc_xive_select_irq(state, &hw_num, &xd);
  729. xive_native_sync_source(hw_num);
  730. xive_native_sync_queue(hw_num);
  731. }
  732. }
  733. static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
  734. {
  735. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  736. unsigned int prio;
  737. int srcu_idx;
  738. if (!xc)
  739. return -ENOENT;
  740. for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
  741. struct xive_q *q = &xc->queues[prio];
  742. if (!q->qpage)
  743. continue;
  744. /* Mark EQ page dirty for migration */
  745. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  746. mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
  747. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  748. }
  749. return 0;
  750. }
  751. static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
  752. {
  753. struct kvm *kvm = xive->kvm;
  754. struct kvm_vcpu *vcpu;
  755. unsigned long i;
  756. pr_devel("%s\n", __func__);
  757. mutex_lock(&xive->lock);
  758. for (i = 0; i <= xive->max_sbid; i++) {
  759. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  760. if (sb) {
  761. arch_spin_lock(&sb->lock);
  762. kvmppc_xive_native_sync_sources(sb);
  763. arch_spin_unlock(&sb->lock);
  764. }
  765. }
  766. kvm_for_each_vcpu(i, vcpu, kvm) {
  767. kvmppc_xive_native_vcpu_eq_sync(vcpu);
  768. }
  769. mutex_unlock(&xive->lock);
  770. return 0;
  771. }
  772. static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
  773. struct kvm_device_attr *attr)
  774. {
  775. struct kvmppc_xive *xive = dev->private;
  776. switch (attr->group) {
  777. case KVM_DEV_XIVE_GRP_CTRL:
  778. switch (attr->attr) {
  779. case KVM_DEV_XIVE_RESET:
  780. return kvmppc_xive_reset(xive);
  781. case KVM_DEV_XIVE_EQ_SYNC:
  782. return kvmppc_xive_native_eq_sync(xive);
  783. case KVM_DEV_XIVE_NR_SERVERS:
  784. return kvmppc_xive_set_nr_servers(xive, attr->addr);
  785. }
  786. break;
  787. case KVM_DEV_XIVE_GRP_SOURCE:
  788. return kvmppc_xive_native_set_source(xive, attr->attr,
  789. attr->addr);
  790. case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
  791. return kvmppc_xive_native_set_source_config(xive, attr->attr,
  792. attr->addr);
  793. case KVM_DEV_XIVE_GRP_EQ_CONFIG:
  794. return kvmppc_xive_native_set_queue_config(xive, attr->attr,
  795. attr->addr);
  796. case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
  797. return kvmppc_xive_native_sync_source(xive, attr->attr,
  798. attr->addr);
  799. }
  800. return -ENXIO;
  801. }
  802. static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
  803. struct kvm_device_attr *attr)
  804. {
  805. struct kvmppc_xive *xive = dev->private;
  806. switch (attr->group) {
  807. case KVM_DEV_XIVE_GRP_EQ_CONFIG:
  808. return kvmppc_xive_native_get_queue_config(xive, attr->attr,
  809. attr->addr);
  810. }
  811. return -ENXIO;
  812. }
  813. static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
  814. struct kvm_device_attr *attr)
  815. {
  816. switch (attr->group) {
  817. case KVM_DEV_XIVE_GRP_CTRL:
  818. switch (attr->attr) {
  819. case KVM_DEV_XIVE_RESET:
  820. case KVM_DEV_XIVE_EQ_SYNC:
  821. case KVM_DEV_XIVE_NR_SERVERS:
  822. return 0;
  823. }
  824. break;
  825. case KVM_DEV_XIVE_GRP_SOURCE:
  826. case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
  827. case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
  828. if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
  829. attr->attr < KVMPPC_XIVE_NR_IRQS)
  830. return 0;
  831. break;
  832. case KVM_DEV_XIVE_GRP_EQ_CONFIG:
  833. return 0;
  834. }
  835. return -ENXIO;
  836. }
  837. /*
  838. * Called when device fd is closed. kvm->lock is held.
  839. */
  840. static void kvmppc_xive_native_release(struct kvm_device *dev)
  841. {
  842. struct kvmppc_xive *xive = dev->private;
  843. struct kvm *kvm = xive->kvm;
  844. struct kvm_vcpu *vcpu;
  845. unsigned long i;
  846. pr_devel("Releasing xive native device\n");
  847. /*
  848. * Clear the KVM device file address_space which is used to
  849. * unmap the ESB pages when a device is passed-through.
  850. */
  851. mutex_lock(&xive->mapping_lock);
  852. xive->mapping = NULL;
  853. mutex_unlock(&xive->mapping_lock);
  854. /*
  855. * Since this is the device release function, we know that
  856. * userspace does not have any open fd or mmap referring to
  857. * the device. Therefore there can not be any of the
  858. * device attribute set/get, mmap, or page fault functions
  859. * being executed concurrently, and similarly, the
  860. * connect_vcpu and set/clr_mapped functions also cannot
  861. * be being executed.
  862. */
  863. debugfs_remove(xive->dentry);
  864. /*
  865. * We should clean up the vCPU interrupt presenters first.
  866. */
  867. kvm_for_each_vcpu(i, vcpu, kvm) {
  868. /*
  869. * Take vcpu->mutex to ensure that no one_reg get/set ioctl
  870. * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
  871. * Holding the vcpu->mutex also means that the vcpu cannot
  872. * be executing the KVM_RUN ioctl, and therefore it cannot
  873. * be executing the XIVE push or pull code or accessing
  874. * the XIVE MMIO regions.
  875. */
  876. mutex_lock(&vcpu->mutex);
  877. kvmppc_xive_native_cleanup_vcpu(vcpu);
  878. mutex_unlock(&vcpu->mutex);
  879. }
  880. /*
  881. * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
  882. * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
  883. * against xive code getting called during vcpu execution or
  884. * set/get one_reg operations.
  885. */
  886. kvm->arch.xive = NULL;
  887. for (i = 0; i <= xive->max_sbid; i++) {
  888. if (xive->src_blocks[i])
  889. kvmppc_xive_free_sources(xive->src_blocks[i]);
  890. kfree(xive->src_blocks[i]);
  891. xive->src_blocks[i] = NULL;
  892. }
  893. if (xive->vp_base != XIVE_INVALID_VP)
  894. xive_native_free_vp_block(xive->vp_base);
  895. /*
  896. * A reference of the kvmppc_xive pointer is now kept under
  897. * the xive_devices struct of the machine for reuse. It is
  898. * freed when the VM is destroyed for now until we fix all the
  899. * execution paths.
  900. */
  901. kfree(dev);
  902. }
  903. /*
  904. * Create a XIVE device. kvm->lock is held.
  905. */
  906. static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
  907. {
  908. struct kvmppc_xive *xive;
  909. struct kvm *kvm = dev->kvm;
  910. pr_devel("Creating xive native device\n");
  911. if (kvm->arch.xive)
  912. return -EEXIST;
  913. xive = kvmppc_xive_get_device(kvm, type);
  914. if (!xive)
  915. return -ENOMEM;
  916. dev->private = xive;
  917. xive->dev = dev;
  918. xive->kvm = kvm;
  919. mutex_init(&xive->mapping_lock);
  920. mutex_init(&xive->lock);
  921. /* VP allocation is delayed to the first call to connect_vcpu */
  922. xive->vp_base = XIVE_INVALID_VP;
  923. /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
  924. * on a POWER9 system.
  925. */
  926. xive->nr_servers = KVM_MAX_VCPUS;
  927. if (xive_native_has_single_escalation())
  928. xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
  929. if (xive_native_has_save_restore())
  930. xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
  931. xive->ops = &kvmppc_xive_native_ops;
  932. kvm->arch.xive = xive;
  933. return 0;
  934. }
  935. /*
  936. * Interrupt Pending Buffer (IPB) offset
  937. */
  938. #define TM_IPB_SHIFT 40
  939. #define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT)
  940. int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
  941. {
  942. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  943. u64 opal_state;
  944. int rc;
  945. if (!kvmppc_xive_enabled(vcpu))
  946. return -EPERM;
  947. if (!xc)
  948. return -ENOENT;
  949. /* Thread context registers. We only care about IPB and CPPR */
  950. val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
  951. /* Get the VP state from OPAL */
  952. rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
  953. if (rc)
  954. return rc;
  955. /*
  956. * Capture the backup of IPB register in the NVT structure and
  957. * merge it in our KVM VP state.
  958. */
  959. val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
  960. pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
  961. __func__,
  962. vcpu->arch.xive_saved_state.nsr,
  963. vcpu->arch.xive_saved_state.cppr,
  964. vcpu->arch.xive_saved_state.ipb,
  965. vcpu->arch.xive_saved_state.pipr,
  966. vcpu->arch.xive_saved_state.w01,
  967. (u32) vcpu->arch.xive_cam_word, opal_state);
  968. return 0;
  969. }
  970. int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
  971. {
  972. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  973. struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
  974. pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
  975. val->xive_timaval[0], val->xive_timaval[1]);
  976. if (!kvmppc_xive_enabled(vcpu))
  977. return -EPERM;
  978. if (!xc || !xive)
  979. return -ENOENT;
  980. /* We can't update the state of a "pushed" VCPU */
  981. if (WARN_ON(vcpu->arch.xive_pushed))
  982. return -EBUSY;
  983. /*
  984. * Restore the thread context registers. IPB and CPPR should
  985. * be the only ones that matter.
  986. */
  987. vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
  988. /*
  989. * There is no need to restore the XIVE internal state (IPB
  990. * stored in the NVT) as the IPB register was merged in KVM VP
  991. * state when captured.
  992. */
  993. return 0;
  994. }
  995. bool kvmppc_xive_native_supported(void)
  996. {
  997. return xive_native_has_queue_state_support();
  998. }
  999. static int xive_native_debug_show(struct seq_file *m, void *private)
  1000. {
  1001. struct kvmppc_xive *xive = m->private;
  1002. struct kvm *kvm = xive->kvm;
  1003. struct kvm_vcpu *vcpu;
  1004. unsigned long i;
  1005. if (!kvm)
  1006. return 0;
  1007. seq_puts(m, "=========\nVCPU state\n=========\n");
  1008. kvm_for_each_vcpu(i, vcpu, kvm) {
  1009. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  1010. if (!xc)
  1011. continue;
  1012. seq_printf(m, "VCPU %d: VP=%#x/%02x\n"
  1013. " NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
  1014. xc->server_num, xc->vp_id, xc->vp_chip_id,
  1015. vcpu->arch.xive_saved_state.nsr,
  1016. vcpu->arch.xive_saved_state.cppr,
  1017. vcpu->arch.xive_saved_state.ipb,
  1018. vcpu->arch.xive_saved_state.pipr,
  1019. be64_to_cpu(vcpu->arch.xive_saved_state.w01),
  1020. be32_to_cpu(vcpu->arch.xive_cam_word));
  1021. kvmppc_xive_debug_show_queues(m, vcpu);
  1022. }
  1023. seq_puts(m, "=========\nSources\n=========\n");
  1024. for (i = 0; i <= xive->max_sbid; i++) {
  1025. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  1026. if (sb) {
  1027. arch_spin_lock(&sb->lock);
  1028. kvmppc_xive_debug_show_sources(m, sb);
  1029. arch_spin_unlock(&sb->lock);
  1030. }
  1031. }
  1032. return 0;
  1033. }
  1034. DEFINE_SHOW_ATTRIBUTE(xive_native_debug);
  1035. static void xive_native_debugfs_init(struct kvmppc_xive *xive)
  1036. {
  1037. xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry,
  1038. xive, &xive_native_debug_fops);
  1039. pr_debug("%s: created\n", __func__);
  1040. }
  1041. static void kvmppc_xive_native_init(struct kvm_device *dev)
  1042. {
  1043. struct kvmppc_xive *xive = dev->private;
  1044. /* Register some debug interfaces */
  1045. xive_native_debugfs_init(xive);
  1046. }
  1047. struct kvm_device_ops kvm_xive_native_ops = {
  1048. .name = "kvm-xive-native",
  1049. .create = kvmppc_xive_native_create,
  1050. .init = kvmppc_xive_native_init,
  1051. .release = kvmppc_xive_native_release,
  1052. .set_attr = kvmppc_xive_native_set_attr,
  1053. .get_attr = kvmppc_xive_native_get_attr,
  1054. .has_attr = kvmppc_xive_native_has_attr,
  1055. .mmap = kvmppc_xive_native_mmap,
  1056. };