pci.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * s390 kvm PCI passthrough support
  4. *
  5. * Copyright IBM Corp. 2022
  6. *
  7. * Author(s): Matthew Rosato <[email protected]>
  8. */
  9. #include <linux/kvm_host.h>
  10. #include <linux/pci.h>
  11. #include <asm/pci.h>
  12. #include <asm/pci_insn.h>
  13. #include <asm/pci_io.h>
  14. #include <asm/sclp.h>
  15. #include "pci.h"
  16. #include "kvm-s390.h"
  17. struct zpci_aift *aift;
  18. static inline int __set_irq_noiib(u16 ctl, u8 isc)
  19. {
  20. union zpci_sic_iib iib = {{0}};
  21. return zpci_set_irq_ctrl(ctl, isc, &iib);
  22. }
  23. void kvm_s390_pci_aen_exit(void)
  24. {
  25. unsigned long flags;
  26. struct kvm_zdev **gait_kzdev;
  27. lockdep_assert_held(&aift->aift_lock);
  28. /*
  29. * Contents of the aipb remain registered for the life of the host
  30. * kernel, the information preserved in zpci_aipb and zpci_aif_sbv
  31. * in case we insert the KVM module again later. Clear the AIFT
  32. * information and free anything not registered with underlying
  33. * firmware.
  34. */
  35. spin_lock_irqsave(&aift->gait_lock, flags);
  36. gait_kzdev = aift->kzdev;
  37. aift->gait = NULL;
  38. aift->sbv = NULL;
  39. aift->kzdev = NULL;
  40. spin_unlock_irqrestore(&aift->gait_lock, flags);
  41. kfree(gait_kzdev);
  42. }
  43. static int zpci_setup_aipb(u8 nisc)
  44. {
  45. struct page *page;
  46. int size, rc;
  47. zpci_aipb = kzalloc(sizeof(union zpci_sic_iib), GFP_KERNEL);
  48. if (!zpci_aipb)
  49. return -ENOMEM;
  50. aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
  51. if (!aift->sbv) {
  52. rc = -ENOMEM;
  53. goto free_aipb;
  54. }
  55. zpci_aif_sbv = aift->sbv;
  56. size = get_order(PAGE_ALIGN(ZPCI_NR_DEVICES *
  57. sizeof(struct zpci_gaite)));
  58. page = alloc_pages(GFP_KERNEL | __GFP_ZERO, size);
  59. if (!page) {
  60. rc = -ENOMEM;
  61. goto free_sbv;
  62. }
  63. aift->gait = (struct zpci_gaite *)page_to_virt(page);
  64. zpci_aipb->aipb.faisb = virt_to_phys(aift->sbv->vector);
  65. zpci_aipb->aipb.gait = virt_to_phys(aift->gait);
  66. zpci_aipb->aipb.afi = nisc;
  67. zpci_aipb->aipb.faal = ZPCI_NR_DEVICES;
  68. /* Setup Adapter Event Notification Interpretation */
  69. if (zpci_set_irq_ctrl(SIC_SET_AENI_CONTROLS, 0, zpci_aipb)) {
  70. rc = -EIO;
  71. goto free_gait;
  72. }
  73. return 0;
  74. free_gait:
  75. free_pages((unsigned long)aift->gait, size);
  76. free_sbv:
  77. airq_iv_release(aift->sbv);
  78. zpci_aif_sbv = NULL;
  79. free_aipb:
  80. kfree(zpci_aipb);
  81. zpci_aipb = NULL;
  82. return rc;
  83. }
  84. static int zpci_reset_aipb(u8 nisc)
  85. {
  86. /*
  87. * AEN registration can only happen once per system boot. If
  88. * an aipb already exists then AEN was already registered and
  89. * we can re-use the aipb contents. This can only happen if
  90. * the KVM module was removed and re-inserted. However, we must
  91. * ensure that the same forwarding ISC is used as this is assigned
  92. * during KVM module load.
  93. */
  94. if (zpci_aipb->aipb.afi != nisc)
  95. return -EINVAL;
  96. aift->sbv = zpci_aif_sbv;
  97. aift->gait = (struct zpci_gaite *)zpci_aipb->aipb.gait;
  98. return 0;
  99. }
  100. int kvm_s390_pci_aen_init(u8 nisc)
  101. {
  102. int rc = 0;
  103. /* If already enabled for AEN, bail out now */
  104. if (aift->gait || aift->sbv)
  105. return -EPERM;
  106. mutex_lock(&aift->aift_lock);
  107. aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev *),
  108. GFP_KERNEL);
  109. if (!aift->kzdev) {
  110. rc = -ENOMEM;
  111. goto unlock;
  112. }
  113. if (!zpci_aipb)
  114. rc = zpci_setup_aipb(nisc);
  115. else
  116. rc = zpci_reset_aipb(nisc);
  117. if (rc)
  118. goto free_zdev;
  119. /* Enable floating IRQs */
  120. if (__set_irq_noiib(SIC_IRQ_MODE_SINGLE, nisc)) {
  121. rc = -EIO;
  122. kvm_s390_pci_aen_exit();
  123. }
  124. goto unlock;
  125. free_zdev:
  126. kfree(aift->kzdev);
  127. unlock:
  128. mutex_unlock(&aift->aift_lock);
  129. return rc;
  130. }
  131. /* Modify PCI: Register floating adapter interruption forwarding */
  132. static int kvm_zpci_set_airq(struct zpci_dev *zdev)
  133. {
  134. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
  135. struct zpci_fib fib = {};
  136. u8 status;
  137. fib.fmt0.isc = zdev->kzdev->fib.fmt0.isc;
  138. fib.fmt0.sum = 1; /* enable summary notifications */
  139. fib.fmt0.noi = airq_iv_end(zdev->aibv);
  140. fib.fmt0.aibv = virt_to_phys(zdev->aibv->vector);
  141. fib.fmt0.aibvo = 0;
  142. fib.fmt0.aisb = virt_to_phys(aift->sbv->vector + (zdev->aisb / 64) * 8);
  143. fib.fmt0.aisbo = zdev->aisb & 63;
  144. fib.gd = zdev->gisa;
  145. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  146. }
  147. /* Modify PCI: Unregister floating adapter interruption forwarding */
  148. static int kvm_zpci_clear_airq(struct zpci_dev *zdev)
  149. {
  150. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
  151. struct zpci_fib fib = {};
  152. u8 cc, status;
  153. fib.gd = zdev->gisa;
  154. cc = zpci_mod_fc(req, &fib, &status);
  155. if (cc == 3 || (cc == 1 && status == 24))
  156. /* Function already gone or IRQs already deregistered. */
  157. cc = 0;
  158. return cc ? -EIO : 0;
  159. }
  160. static inline void unaccount_mem(unsigned long nr_pages)
  161. {
  162. struct user_struct *user = get_uid(current_user());
  163. if (user)
  164. atomic_long_sub(nr_pages, &user->locked_vm);
  165. if (current->mm)
  166. atomic64_sub(nr_pages, &current->mm->pinned_vm);
  167. }
  168. static inline int account_mem(unsigned long nr_pages)
  169. {
  170. struct user_struct *user = get_uid(current_user());
  171. unsigned long page_limit, cur_pages, new_pages;
  172. page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  173. do {
  174. cur_pages = atomic_long_read(&user->locked_vm);
  175. new_pages = cur_pages + nr_pages;
  176. if (new_pages > page_limit)
  177. return -ENOMEM;
  178. } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
  179. new_pages) != cur_pages);
  180. atomic64_add(nr_pages, &current->mm->pinned_vm);
  181. return 0;
  182. }
  183. static int kvm_s390_pci_aif_enable(struct zpci_dev *zdev, struct zpci_fib *fib,
  184. bool assist)
  185. {
  186. struct page *pages[1], *aibv_page, *aisb_page = NULL;
  187. unsigned int msi_vecs, idx;
  188. struct zpci_gaite *gaite;
  189. unsigned long hva, bit;
  190. struct kvm *kvm;
  191. phys_addr_t gaddr;
  192. int rc = 0, gisc, npages, pcount = 0;
  193. /*
  194. * Interrupt forwarding is only applicable if the device is already
  195. * enabled for interpretation
  196. */
  197. if (zdev->gisa == 0)
  198. return -EINVAL;
  199. kvm = zdev->kzdev->kvm;
  200. msi_vecs = min_t(unsigned int, fib->fmt0.noi, zdev->max_msi);
  201. /* Get the associated forwarding ISC - if invalid, return the error */
  202. gisc = kvm_s390_gisc_register(kvm, fib->fmt0.isc);
  203. if (gisc < 0)
  204. return gisc;
  205. /* Replace AIBV address */
  206. idx = srcu_read_lock(&kvm->srcu);
  207. hva = gfn_to_hva(kvm, gpa_to_gfn((gpa_t)fib->fmt0.aibv));
  208. npages = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, pages);
  209. srcu_read_unlock(&kvm->srcu, idx);
  210. if (npages < 1) {
  211. rc = -EIO;
  212. goto out;
  213. }
  214. aibv_page = pages[0];
  215. pcount++;
  216. gaddr = page_to_phys(aibv_page) + (fib->fmt0.aibv & ~PAGE_MASK);
  217. fib->fmt0.aibv = gaddr;
  218. /* Pin the guest AISB if one was specified */
  219. if (fib->fmt0.sum == 1) {
  220. idx = srcu_read_lock(&kvm->srcu);
  221. hva = gfn_to_hva(kvm, gpa_to_gfn((gpa_t)fib->fmt0.aisb));
  222. npages = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM,
  223. pages);
  224. srcu_read_unlock(&kvm->srcu, idx);
  225. if (npages < 1) {
  226. rc = -EIO;
  227. goto unpin1;
  228. }
  229. aisb_page = pages[0];
  230. pcount++;
  231. }
  232. /* Account for pinned pages, roll back on failure */
  233. if (account_mem(pcount))
  234. goto unpin2;
  235. /* AISB must be allocated before we can fill in GAITE */
  236. mutex_lock(&aift->aift_lock);
  237. bit = airq_iv_alloc_bit(aift->sbv);
  238. if (bit == -1UL)
  239. goto unlock;
  240. zdev->aisb = bit; /* store the summary bit number */
  241. zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA |
  242. AIRQ_IV_BITLOCK |
  243. AIRQ_IV_GUESTVEC,
  244. phys_to_virt(fib->fmt0.aibv));
  245. spin_lock_irq(&aift->gait_lock);
  246. gaite = (struct zpci_gaite *)aift->gait + (zdev->aisb *
  247. sizeof(struct zpci_gaite));
  248. /* If assist not requested, host will get all alerts */
  249. if (assist)
  250. gaite->gisa = (u32)virt_to_phys(&kvm->arch.sie_page2->gisa);
  251. else
  252. gaite->gisa = 0;
  253. gaite->gisc = fib->fmt0.isc;
  254. gaite->count++;
  255. gaite->aisbo = fib->fmt0.aisbo;
  256. gaite->aisb = virt_to_phys(page_address(aisb_page) + (fib->fmt0.aisb &
  257. ~PAGE_MASK));
  258. aift->kzdev[zdev->aisb] = zdev->kzdev;
  259. spin_unlock_irq(&aift->gait_lock);
  260. /* Update guest FIB for re-issue */
  261. fib->fmt0.aisbo = zdev->aisb & 63;
  262. fib->fmt0.aisb = virt_to_phys(aift->sbv->vector + (zdev->aisb / 64) * 8);
  263. fib->fmt0.isc = gisc;
  264. /* Save some guest fib values in the host for later use */
  265. zdev->kzdev->fib.fmt0.isc = fib->fmt0.isc;
  266. zdev->kzdev->fib.fmt0.aibv = fib->fmt0.aibv;
  267. mutex_unlock(&aift->aift_lock);
  268. /* Issue the clp to setup the irq now */
  269. rc = kvm_zpci_set_airq(zdev);
  270. return rc;
  271. unlock:
  272. mutex_unlock(&aift->aift_lock);
  273. unpin2:
  274. if (fib->fmt0.sum == 1)
  275. unpin_user_page(aisb_page);
  276. unpin1:
  277. unpin_user_page(aibv_page);
  278. out:
  279. return rc;
  280. }
  281. static int kvm_s390_pci_aif_disable(struct zpci_dev *zdev, bool force)
  282. {
  283. struct kvm_zdev *kzdev = zdev->kzdev;
  284. struct zpci_gaite *gaite;
  285. struct page *vpage = NULL, *spage = NULL;
  286. int rc, pcount = 0;
  287. u8 isc;
  288. if (zdev->gisa == 0)
  289. return -EINVAL;
  290. mutex_lock(&aift->aift_lock);
  291. /*
  292. * If the clear fails due to an error, leave now unless we know this
  293. * device is about to go away (force) -- In that case clear the GAITE
  294. * regardless.
  295. */
  296. rc = kvm_zpci_clear_airq(zdev);
  297. if (rc && !force)
  298. goto out;
  299. if (zdev->kzdev->fib.fmt0.aibv == 0)
  300. goto out;
  301. spin_lock_irq(&aift->gait_lock);
  302. gaite = (struct zpci_gaite *)aift->gait + (zdev->aisb *
  303. sizeof(struct zpci_gaite));
  304. isc = gaite->gisc;
  305. gaite->count--;
  306. if (gaite->count == 0) {
  307. /* Release guest AIBV and AISB */
  308. vpage = phys_to_page(kzdev->fib.fmt0.aibv);
  309. if (gaite->aisb != 0)
  310. spage = phys_to_page(gaite->aisb);
  311. /* Clear the GAIT entry */
  312. gaite->aisb = 0;
  313. gaite->gisc = 0;
  314. gaite->aisbo = 0;
  315. gaite->gisa = 0;
  316. aift->kzdev[zdev->aisb] = NULL;
  317. /* Clear zdev info */
  318. airq_iv_free_bit(aift->sbv, zdev->aisb);
  319. airq_iv_release(zdev->aibv);
  320. zdev->aisb = 0;
  321. zdev->aibv = NULL;
  322. }
  323. spin_unlock_irq(&aift->gait_lock);
  324. kvm_s390_gisc_unregister(kzdev->kvm, isc);
  325. kzdev->fib.fmt0.isc = 0;
  326. kzdev->fib.fmt0.aibv = 0;
  327. if (vpage) {
  328. unpin_user_page(vpage);
  329. pcount++;
  330. }
  331. if (spage) {
  332. unpin_user_page(spage);
  333. pcount++;
  334. }
  335. if (pcount > 0)
  336. unaccount_mem(pcount);
  337. out:
  338. mutex_unlock(&aift->aift_lock);
  339. return rc;
  340. }
  341. static int kvm_s390_pci_dev_open(struct zpci_dev *zdev)
  342. {
  343. struct kvm_zdev *kzdev;
  344. kzdev = kzalloc(sizeof(struct kvm_zdev), GFP_KERNEL);
  345. if (!kzdev)
  346. return -ENOMEM;
  347. kzdev->zdev = zdev;
  348. zdev->kzdev = kzdev;
  349. return 0;
  350. }
  351. static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
  352. {
  353. struct kvm_zdev *kzdev;
  354. kzdev = zdev->kzdev;
  355. WARN_ON(kzdev->zdev != zdev);
  356. zdev->kzdev = NULL;
  357. kfree(kzdev);
  358. }
  359. /*
  360. * Register device with the specified KVM. If interpetation facilities are
  361. * available, enable them and let userspace indicate whether or not they will
  362. * be used (specify SHM bit to disable).
  363. */
  364. static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
  365. {
  366. struct zpci_dev *zdev = opaque;
  367. int rc;
  368. if (!zdev)
  369. return -EINVAL;
  370. mutex_lock(&zdev->kzdev_lock);
  371. if (zdev->kzdev || zdev->gisa != 0 || !kvm) {
  372. mutex_unlock(&zdev->kzdev_lock);
  373. return -EINVAL;
  374. }
  375. kvm_get_kvm(kvm);
  376. mutex_lock(&kvm->lock);
  377. rc = kvm_s390_pci_dev_open(zdev);
  378. if (rc)
  379. goto err;
  380. /*
  381. * If interpretation facilities aren't available, add the device to
  382. * the kzdev list but don't enable for interpretation.
  383. */
  384. if (!kvm_s390_pci_interp_allowed())
  385. goto out;
  386. /*
  387. * If this is the first request to use an interpreted device, make the
  388. * necessary vcpu changes
  389. */
  390. if (!kvm->arch.use_zpci_interp)
  391. kvm_s390_vcpu_pci_enable_interp(kvm);
  392. if (zdev_enabled(zdev)) {
  393. rc = zpci_disable_device(zdev);
  394. if (rc)
  395. goto err;
  396. }
  397. /*
  398. * Store information about the identity of the kvm guest allowed to
  399. * access this device via interpretation to be used by host CLP
  400. */
  401. zdev->gisa = (u32)virt_to_phys(&kvm->arch.sie_page2->gisa);
  402. rc = zpci_enable_device(zdev);
  403. if (rc)
  404. goto clear_gisa;
  405. /* Re-register the IOMMU that was already created */
  406. rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
  407. virt_to_phys(zdev->dma_table));
  408. if (rc)
  409. goto clear_gisa;
  410. out:
  411. zdev->kzdev->kvm = kvm;
  412. spin_lock(&kvm->arch.kzdev_list_lock);
  413. list_add_tail(&zdev->kzdev->entry, &kvm->arch.kzdev_list);
  414. spin_unlock(&kvm->arch.kzdev_list_lock);
  415. mutex_unlock(&kvm->lock);
  416. mutex_unlock(&zdev->kzdev_lock);
  417. return 0;
  418. clear_gisa:
  419. zdev->gisa = 0;
  420. err:
  421. if (zdev->kzdev)
  422. kvm_s390_pci_dev_release(zdev);
  423. mutex_unlock(&kvm->lock);
  424. mutex_unlock(&zdev->kzdev_lock);
  425. kvm_put_kvm(kvm);
  426. return rc;
  427. }
  428. static void kvm_s390_pci_unregister_kvm(void *opaque)
  429. {
  430. struct zpci_dev *zdev = opaque;
  431. struct kvm *kvm;
  432. if (!zdev)
  433. return;
  434. mutex_lock(&zdev->kzdev_lock);
  435. if (WARN_ON(!zdev->kzdev)) {
  436. mutex_unlock(&zdev->kzdev_lock);
  437. return;
  438. }
  439. kvm = zdev->kzdev->kvm;
  440. mutex_lock(&kvm->lock);
  441. /*
  442. * A 0 gisa means interpretation was never enabled, just remove the
  443. * device from the list.
  444. */
  445. if (zdev->gisa == 0)
  446. goto out;
  447. /* Forwarding must be turned off before interpretation */
  448. if (zdev->kzdev->fib.fmt0.aibv != 0)
  449. kvm_s390_pci_aif_disable(zdev, true);
  450. /* Remove the host CLP guest designation */
  451. zdev->gisa = 0;
  452. if (zdev_enabled(zdev)) {
  453. if (zpci_disable_device(zdev))
  454. goto out;
  455. }
  456. if (zpci_enable_device(zdev))
  457. goto out;
  458. /* Re-register the IOMMU that was already created */
  459. zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
  460. virt_to_phys(zdev->dma_table));
  461. out:
  462. spin_lock(&kvm->arch.kzdev_list_lock);
  463. list_del(&zdev->kzdev->entry);
  464. spin_unlock(&kvm->arch.kzdev_list_lock);
  465. kvm_s390_pci_dev_release(zdev);
  466. mutex_unlock(&kvm->lock);
  467. mutex_unlock(&zdev->kzdev_lock);
  468. kvm_put_kvm(kvm);
  469. }
  470. void kvm_s390_pci_init_list(struct kvm *kvm)
  471. {
  472. spin_lock_init(&kvm->arch.kzdev_list_lock);
  473. INIT_LIST_HEAD(&kvm->arch.kzdev_list);
  474. }
  475. void kvm_s390_pci_clear_list(struct kvm *kvm)
  476. {
  477. /*
  478. * This list should already be empty, either via vfio device closures
  479. * or kvm fd cleanup.
  480. */
  481. spin_lock(&kvm->arch.kzdev_list_lock);
  482. WARN_ON_ONCE(!list_empty(&kvm->arch.kzdev_list));
  483. spin_unlock(&kvm->arch.kzdev_list_lock);
  484. }
  485. static struct zpci_dev *get_zdev_from_kvm_by_fh(struct kvm *kvm, u32 fh)
  486. {
  487. struct zpci_dev *zdev = NULL;
  488. struct kvm_zdev *kzdev;
  489. spin_lock(&kvm->arch.kzdev_list_lock);
  490. list_for_each_entry(kzdev, &kvm->arch.kzdev_list, entry) {
  491. if (kzdev->zdev->fh == fh) {
  492. zdev = kzdev->zdev;
  493. break;
  494. }
  495. }
  496. spin_unlock(&kvm->arch.kzdev_list_lock);
  497. return zdev;
  498. }
  499. static int kvm_s390_pci_zpci_reg_aen(struct zpci_dev *zdev,
  500. struct kvm_s390_zpci_op *args)
  501. {
  502. struct zpci_fib fib = {};
  503. bool hostflag;
  504. fib.fmt0.aibv = args->u.reg_aen.ibv;
  505. fib.fmt0.isc = args->u.reg_aen.isc;
  506. fib.fmt0.noi = args->u.reg_aen.noi;
  507. if (args->u.reg_aen.sb != 0) {
  508. fib.fmt0.aisb = args->u.reg_aen.sb;
  509. fib.fmt0.aisbo = args->u.reg_aen.sbo;
  510. fib.fmt0.sum = 1;
  511. } else {
  512. fib.fmt0.aisb = 0;
  513. fib.fmt0.aisbo = 0;
  514. fib.fmt0.sum = 0;
  515. }
  516. hostflag = !(args->u.reg_aen.flags & KVM_S390_ZPCIOP_REGAEN_HOST);
  517. return kvm_s390_pci_aif_enable(zdev, &fib, hostflag);
  518. }
  519. int kvm_s390_pci_zpci_op(struct kvm *kvm, struct kvm_s390_zpci_op *args)
  520. {
  521. struct kvm_zdev *kzdev;
  522. struct zpci_dev *zdev;
  523. int r;
  524. zdev = get_zdev_from_kvm_by_fh(kvm, args->fh);
  525. if (!zdev)
  526. return -ENODEV;
  527. mutex_lock(&zdev->kzdev_lock);
  528. mutex_lock(&kvm->lock);
  529. kzdev = zdev->kzdev;
  530. if (!kzdev) {
  531. r = -ENODEV;
  532. goto out;
  533. }
  534. if (kzdev->kvm != kvm) {
  535. r = -EPERM;
  536. goto out;
  537. }
  538. switch (args->op) {
  539. case KVM_S390_ZPCIOP_REG_AEN:
  540. /* Fail on unknown flags */
  541. if (args->u.reg_aen.flags & ~KVM_S390_ZPCIOP_REGAEN_HOST) {
  542. r = -EINVAL;
  543. break;
  544. }
  545. r = kvm_s390_pci_zpci_reg_aen(zdev, args);
  546. break;
  547. case KVM_S390_ZPCIOP_DEREG_AEN:
  548. r = kvm_s390_pci_aif_disable(zdev, false);
  549. break;
  550. default:
  551. r = -EINVAL;
  552. }
  553. out:
  554. mutex_unlock(&kvm->lock);
  555. mutex_unlock(&zdev->kzdev_lock);
  556. return r;
  557. }
  558. int kvm_s390_pci_init(void)
  559. {
  560. zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
  561. zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
  562. if (!kvm_s390_pci_interp_allowed())
  563. return 0;
  564. aift = kzalloc(sizeof(struct zpci_aift), GFP_KERNEL);
  565. if (!aift)
  566. return -ENOMEM;
  567. spin_lock_init(&aift->gait_lock);
  568. mutex_init(&aift->aift_lock);
  569. return 0;
  570. }
  571. void kvm_s390_pci_exit(void)
  572. {
  573. zpci_kvm_hook.kvm_register = NULL;
  574. zpci_kvm_hook.kvm_unregister = NULL;
  575. if (!kvm_s390_pci_interp_allowed())
  576. return;
  577. mutex_destroy(&aift->aift_lock);
  578. kfree(aift);
  579. }