vfio_ap_ops.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Adjunct processor matrix VFIO device driver callbacks.
  4. *
  5. * Copyright IBM Corp. 2018
  6. *
  7. * Author(s): Tony Krowiak <[email protected]>
  8. * Halil Pasic <[email protected]>
  9. * Pierre Morel <[email protected]>
  10. */
  11. #include <linux/string.h>
  12. #include <linux/vfio.h>
  13. #include <linux/device.h>
  14. #include <linux/list.h>
  15. #include <linux/ctype.h>
  16. #include <linux/bitops.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/module.h>
  19. #include <linux/uuid.h>
  20. #include <asm/kvm.h>
  21. #include <asm/zcrypt.h>
  22. #include "vfio_ap_private.h"
  23. #include "vfio_ap_debug.h"
  24. #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
  25. #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
  26. #define AP_QUEUE_ASSIGNED "assigned"
  27. #define AP_QUEUE_UNASSIGNED "unassigned"
  28. #define AP_QUEUE_IN_USE "in use"
  29. static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
  30. static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
  31. static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
  32. static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
  33. /**
  34. * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
  35. * KVM guest's APCB in the proper order.
  36. *
  37. * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
  38. *
  39. * The proper locking order is:
  40. * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
  41. * guest's APCB.
  42. * 2. kvm->lock: required to update a guest's APCB
  43. * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
  44. *
  45. * Note: If @kvm is NULL, the KVM lock will not be taken.
  46. */
  47. static inline void get_update_locks_for_kvm(struct kvm *kvm)
  48. {
  49. mutex_lock(&matrix_dev->guests_lock);
  50. if (kvm)
  51. mutex_lock(&kvm->lock);
  52. mutex_lock(&matrix_dev->mdevs_lock);
  53. }
  54. /**
  55. * release_update_locks_for_kvm: Release the locks used to dynamically update a
  56. * KVM guest's APCB in the proper order.
  57. *
  58. * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
  59. *
  60. * The proper unlocking order is:
  61. * 1. matrix_dev->mdevs_lock
  62. * 2. kvm->lock
  63. * 3. matrix_dev->guests_lock
  64. *
  65. * Note: If @kvm is NULL, the KVM lock will not be released.
  66. */
  67. static inline void release_update_locks_for_kvm(struct kvm *kvm)
  68. {
  69. mutex_unlock(&matrix_dev->mdevs_lock);
  70. if (kvm)
  71. mutex_unlock(&kvm->lock);
  72. mutex_unlock(&matrix_dev->guests_lock);
  73. }
  74. /**
  75. * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
  76. * KVM guest's APCB in the proper order.
  77. *
  78. * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
  79. * configuration data to use to update a KVM guest's APCB.
  80. *
  81. * The proper locking order is:
  82. * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
  83. * guest's APCB.
  84. * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
  85. * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
  86. *
  87. * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
  88. * lock will not be taken.
  89. */
  90. static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
  91. {
  92. mutex_lock(&matrix_dev->guests_lock);
  93. if (matrix_mdev && matrix_mdev->kvm)
  94. mutex_lock(&matrix_mdev->kvm->lock);
  95. mutex_lock(&matrix_dev->mdevs_lock);
  96. }
  97. /**
  98. * release_update_locks_for_mdev: Release the locks used to dynamically update a
  99. * KVM guest's APCB in the proper order.
  100. *
  101. * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
  102. * configuration data to use to update a KVM guest's APCB.
  103. *
  104. * The proper unlocking order is:
  105. * 1. matrix_dev->mdevs_lock
  106. * 2. matrix_mdev->kvm->lock
  107. * 3. matrix_dev->guests_lock
  108. *
  109. * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
  110. * lock will not be released.
  111. */
  112. static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
  113. {
  114. mutex_unlock(&matrix_dev->mdevs_lock);
  115. if (matrix_mdev && matrix_mdev->kvm)
  116. mutex_unlock(&matrix_mdev->kvm->lock);
  117. mutex_unlock(&matrix_dev->guests_lock);
  118. }
  119. /**
  120. * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
  121. * acquire the locks required to update the APCB of
  122. * the KVM guest to which the mdev is attached.
  123. *
  124. * @apqn: the APQN of a queue device.
  125. *
  126. * The proper locking order is:
  127. * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
  128. * guest's APCB.
  129. * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
  130. * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
  131. *
  132. * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
  133. * will not be taken.
  134. *
  135. * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
  136. * is not assigned to an ap_matrix_mdev.
  137. */
  138. static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
  139. {
  140. struct ap_matrix_mdev *matrix_mdev;
  141. mutex_lock(&matrix_dev->guests_lock);
  142. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  143. if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
  144. test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
  145. if (matrix_mdev->kvm)
  146. mutex_lock(&matrix_mdev->kvm->lock);
  147. mutex_lock(&matrix_dev->mdevs_lock);
  148. return matrix_mdev;
  149. }
  150. }
  151. mutex_lock(&matrix_dev->mdevs_lock);
  152. return NULL;
  153. }
  154. /**
  155. * get_update_locks_for_queue: get the locks required to update the APCB of the
  156. * KVM guest to which the matrix mdev linked to a
  157. * vfio_ap_queue object is attached.
  158. *
  159. * @q: a pointer to a vfio_ap_queue object.
  160. *
  161. * The proper locking order is:
  162. * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
  163. * KVM guest's APCB.
  164. * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
  165. * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
  166. *
  167. * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
  168. * will not be taken.
  169. */
  170. static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
  171. {
  172. mutex_lock(&matrix_dev->guests_lock);
  173. if (q->matrix_mdev && q->matrix_mdev->kvm)
  174. mutex_lock(&q->matrix_mdev->kvm->lock);
  175. mutex_lock(&matrix_dev->mdevs_lock);
  176. }
  177. /**
  178. * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
  179. * hash table of queues assigned to a matrix mdev
  180. * @matrix_mdev: the matrix mdev
  181. * @apqn: The APQN of a queue device
  182. *
  183. * Return: the pointer to the vfio_ap_queue struct representing the queue or
  184. * NULL if the queue is not assigned to @matrix_mdev
  185. */
  186. static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
  187. struct ap_matrix_mdev *matrix_mdev,
  188. int apqn)
  189. {
  190. struct vfio_ap_queue *q;
  191. hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
  192. apqn) {
  193. if (q && q->apqn == apqn)
  194. return q;
  195. }
  196. return NULL;
  197. }
  198. /**
  199. * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
  200. * @apqn: The AP Queue number
  201. *
  202. * Checks the IRQ bit for the status of this APQN using ap_tapq.
  203. * Returns if the ap_tapq function succeeded and the bit is clear.
  204. * Returns if ap_tapq function failed with invalid, deconfigured or
  205. * checkstopped AP.
  206. * Otherwise retries up to 5 times after waiting 20ms.
  207. */
  208. static void vfio_ap_wait_for_irqclear(int apqn)
  209. {
  210. struct ap_queue_status status;
  211. int retry = 5;
  212. do {
  213. status = ap_tapq(apqn, NULL);
  214. switch (status.response_code) {
  215. case AP_RESPONSE_NORMAL:
  216. case AP_RESPONSE_RESET_IN_PROGRESS:
  217. if (!status.irq_enabled)
  218. return;
  219. fallthrough;
  220. case AP_RESPONSE_BUSY:
  221. msleep(20);
  222. break;
  223. case AP_RESPONSE_Q_NOT_AVAIL:
  224. case AP_RESPONSE_DECONFIGURED:
  225. case AP_RESPONSE_CHECKSTOPPED:
  226. default:
  227. WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
  228. status.response_code, apqn);
  229. return;
  230. }
  231. } while (--retry);
  232. WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
  233. __func__, status.response_code, apqn);
  234. }
  235. /**
  236. * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
  237. * @q: The vfio_ap_queue
  238. *
  239. * Unregisters the ISC in the GIB when the saved ISC not invalid.
  240. * Unpins the guest's page holding the NIB when it exists.
  241. * Resets the saved_iova and saved_isc to invalid values.
  242. */
  243. static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  244. {
  245. if (!q)
  246. return;
  247. if (q->saved_isc != VFIO_AP_ISC_INVALID &&
  248. !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
  249. kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
  250. q->saved_isc = VFIO_AP_ISC_INVALID;
  251. }
  252. if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
  253. vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
  254. q->saved_iova = 0;
  255. }
  256. }
  257. /**
  258. * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
  259. * @q: The vfio_ap_queue
  260. *
  261. * Uses ap_aqic to disable the interruption and in case of success, reset
  262. * in progress or IRQ disable command already proceeded: calls
  263. * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
  264. * and calls vfio_ap_free_aqic_resources() to free the resources associated
  265. * with the AP interrupt handling.
  266. *
  267. * In the case the AP is busy, or a reset is in progress,
  268. * retries after 20ms, up to 5 times.
  269. *
  270. * Returns if ap_aqic function failed with invalid, deconfigured or
  271. * checkstopped AP.
  272. *
  273. * Return: &struct ap_queue_status
  274. */
  275. static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
  276. {
  277. struct ap_qirq_ctrl aqic_gisa = {};
  278. struct ap_queue_status status;
  279. int retries = 5;
  280. do {
  281. status = ap_aqic(q->apqn, aqic_gisa, 0);
  282. switch (status.response_code) {
  283. case AP_RESPONSE_OTHERWISE_CHANGED:
  284. case AP_RESPONSE_NORMAL:
  285. vfio_ap_wait_for_irqclear(q->apqn);
  286. goto end_free;
  287. case AP_RESPONSE_RESET_IN_PROGRESS:
  288. case AP_RESPONSE_BUSY:
  289. msleep(20);
  290. break;
  291. case AP_RESPONSE_Q_NOT_AVAIL:
  292. case AP_RESPONSE_DECONFIGURED:
  293. case AP_RESPONSE_CHECKSTOPPED:
  294. case AP_RESPONSE_INVALID_ADDRESS:
  295. default:
  296. /* All cases in default means AP not operational */
  297. WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
  298. status.response_code);
  299. goto end_free;
  300. }
  301. } while (retries--);
  302. WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
  303. status.response_code);
  304. end_free:
  305. vfio_ap_free_aqic_resources(q);
  306. return status;
  307. }
  308. /**
  309. * vfio_ap_validate_nib - validate a notification indicator byte (nib) address.
  310. *
  311. * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
  312. * @nib: the location for storing the nib address.
  313. *
  314. * When the PQAP(AQIC) instruction is executed, general register 2 contains the
  315. * address of the notification indicator byte (nib) used for IRQ notification.
  316. * This function parses and validates the nib from gr2.
  317. *
  318. * Return: returns zero if the nib address is a valid; otherwise, returns
  319. * -EINVAL.
  320. */
  321. static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
  322. {
  323. *nib = vcpu->run->s.regs.gprs[2];
  324. if (!*nib)
  325. return -EINVAL;
  326. if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
  327. return -EINVAL;
  328. return 0;
  329. }
  330. /**
  331. * vfio_ap_irq_enable - Enable Interruption for a APQN
  332. *
  333. * @q: the vfio_ap_queue holding AQIC parameters
  334. * @isc: the guest ISC to register with the GIB interface
  335. * @vcpu: the vcpu object containing the registers specifying the parameters
  336. * passed to the PQAP(AQIC) instruction.
  337. *
  338. * Pin the NIB saved in *q
  339. * Register the guest ISC to GIB interface and retrieve the
  340. * host ISC to issue the host side PQAP/AQIC
  341. *
  342. * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
  343. * vfio_pin_pages failed.
  344. *
  345. * Otherwise return the ap_queue_status returned by the ap_aqic(),
  346. * all retry handling will be done by the guest.
  347. *
  348. * Return: &struct ap_queue_status
  349. */
  350. static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
  351. int isc,
  352. struct kvm_vcpu *vcpu)
  353. {
  354. struct ap_qirq_ctrl aqic_gisa = {};
  355. struct ap_queue_status status = {};
  356. struct kvm_s390_gisa *gisa;
  357. struct page *h_page;
  358. int nisc;
  359. struct kvm *kvm;
  360. phys_addr_t h_nib;
  361. dma_addr_t nib;
  362. int ret;
  363. /* Verify that the notification indicator byte address is valid */
  364. if (vfio_ap_validate_nib(vcpu, &nib)) {
  365. VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
  366. __func__, &nib, q->apqn);
  367. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  368. return status;
  369. }
  370. ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
  371. IOMMU_READ | IOMMU_WRITE, &h_page);
  372. switch (ret) {
  373. case 1:
  374. break;
  375. default:
  376. VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
  377. "nib=%pad, apqn=%#04x\n",
  378. __func__, ret, &nib, q->apqn);
  379. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  380. return status;
  381. }
  382. kvm = q->matrix_mdev->kvm;
  383. gisa = kvm->arch.gisa_int.origin;
  384. h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
  385. aqic_gisa.gisc = isc;
  386. nisc = kvm_s390_gisc_register(kvm, isc);
  387. if (nisc < 0) {
  388. VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
  389. __func__, nisc, isc, q->apqn);
  390. status.response_code = AP_RESPONSE_INVALID_GISA;
  391. return status;
  392. }
  393. aqic_gisa.isc = nisc;
  394. aqic_gisa.ir = 1;
  395. aqic_gisa.gisa = (uint64_t)gisa >> 4;
  396. status = ap_aqic(q->apqn, aqic_gisa, h_nib);
  397. switch (status.response_code) {
  398. case AP_RESPONSE_NORMAL:
  399. /* See if we did clear older IRQ configuration */
  400. vfio_ap_free_aqic_resources(q);
  401. q->saved_iova = nib;
  402. q->saved_isc = isc;
  403. break;
  404. case AP_RESPONSE_OTHERWISE_CHANGED:
  405. /* We could not modify IRQ setings: clear new configuration */
  406. vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
  407. kvm_s390_gisc_unregister(kvm, isc);
  408. break;
  409. default:
  410. pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
  411. status.response_code);
  412. vfio_ap_irq_disable(q);
  413. break;
  414. }
  415. if (status.response_code != AP_RESPONSE_NORMAL) {
  416. VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: "
  417. "zone=%#x, ir=%#x, gisc=%#x, f=%#x,"
  418. "gisa=%#x, isc=%#x, apqn=%#04x\n",
  419. __func__, status.response_code,
  420. aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
  421. aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc,
  422. q->apqn);
  423. }
  424. return status;
  425. }
  426. /**
  427. * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array
  428. * of big endian elements that can be passed by
  429. * value to an s390dbf sprintf event function to
  430. * format a UUID string.
  431. *
  432. * @guid: the object containing the little endian guid
  433. * @uuid: a six-element array of long values that can be passed by value as
  434. * arguments for a formatting string specifying a UUID.
  435. *
  436. * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf
  437. * event functions if the memory for the passed string is available as long as
  438. * the debug feature exists. Since a mediated device can be removed at any
  439. * time, it's name can not be used because %s passes the reference to the string
  440. * in memory and the reference will go stale once the device is removed .
  441. *
  442. * The s390dbf string formatting function allows a maximum of 9 arguments for a
  443. * message to be displayed in the 'sprintf' view. In order to use the bytes
  444. * comprising the mediated device's UUID to display the mediated device name,
  445. * they will have to be converted into an array whose elements can be passed by
  446. * value to sprintf. For example:
  447. *
  448. * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 }
  449. * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804
  450. * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 }
  451. * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx"
  452. */
  453. static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
  454. {
  455. /*
  456. * The input guid is ordered in little endian, so it needs to be
  457. * reordered for displaying a UUID as a string. This specifies the
  458. * guid indices in proper order.
  459. */
  460. uuid[0] = le32_to_cpup((__le32 *)guid);
  461. uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]);
  462. uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]);
  463. uuid[3] = *((__u16 *)&guid->b[8]);
  464. uuid[4] = *((__u16 *)&guid->b[10]);
  465. uuid[5] = *((__u32 *)&guid->b[12]);
  466. }
  467. /**
  468. * handle_pqap - PQAP instruction callback
  469. *
  470. * @vcpu: The vcpu on which we received the PQAP instruction
  471. *
  472. * Get the general register contents to initialize internal variables.
  473. * REG[0]: APQN
  474. * REG[1]: IR and ISC
  475. * REG[2]: NIB
  476. *
  477. * Response.status may be set to following Response Code:
  478. * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
  479. * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
  480. * - AP_RESPONSE_NORMAL (0) : in case of successs
  481. * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
  482. * We take the matrix_dev lock to ensure serialization on queues and
  483. * mediated device access.
  484. *
  485. * Return: 0 if we could handle the request inside KVM.
  486. * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
  487. */
  488. static int handle_pqap(struct kvm_vcpu *vcpu)
  489. {
  490. uint64_t status;
  491. uint16_t apqn;
  492. unsigned long uuid[6];
  493. struct vfio_ap_queue *q;
  494. struct ap_queue_status qstatus = {
  495. .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
  496. struct ap_matrix_mdev *matrix_mdev;
  497. apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
  498. /* If we do not use the AIV facility just go to userland */
  499. if (!(vcpu->arch.sie_block->eca & ECA_AIV)) {
  500. VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n",
  501. __func__, apqn, vcpu->arch.sie_block->eca);
  502. return -EOPNOTSUPP;
  503. }
  504. mutex_lock(&matrix_dev->mdevs_lock);
  505. if (!vcpu->kvm->arch.crypto.pqap_hook) {
  506. VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
  507. __func__, apqn);
  508. goto out_unlock;
  509. }
  510. matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
  511. struct ap_matrix_mdev, pqap_hook);
  512. /* If the there is no guest using the mdev, there is nothing to do */
  513. if (!matrix_mdev->kvm) {
  514. vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid);
  515. VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n",
  516. __func__, uuid[0], uuid[1], uuid[2],
  517. uuid[3], uuid[4], uuid[5], apqn);
  518. goto out_unlock;
  519. }
  520. q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
  521. if (!q) {
  522. VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
  523. __func__, AP_QID_CARD(apqn),
  524. AP_QID_QUEUE(apqn));
  525. goto out_unlock;
  526. }
  527. status = vcpu->run->s.regs.gprs[1];
  528. /* If IR bit(16) is set we enable the interrupt */
  529. if ((status >> (63 - 16)) & 0x01)
  530. qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
  531. else
  532. qstatus = vfio_ap_irq_disable(q);
  533. out_unlock:
  534. memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
  535. vcpu->run->s.regs.gprs[1] >>= 32;
  536. mutex_unlock(&matrix_dev->mdevs_lock);
  537. return 0;
  538. }
  539. static void vfio_ap_matrix_init(struct ap_config_info *info,
  540. struct ap_matrix *matrix)
  541. {
  542. matrix->apm_max = info->apxa ? info->Na : 63;
  543. matrix->aqm_max = info->apxa ? info->Nd : 15;
  544. matrix->adm_max = info->apxa ? info->Nd : 15;
  545. }
  546. static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
  547. {
  548. if (matrix_mdev->kvm)
  549. kvm_arch_crypto_set_masks(matrix_mdev->kvm,
  550. matrix_mdev->shadow_apcb.apm,
  551. matrix_mdev->shadow_apcb.aqm,
  552. matrix_mdev->shadow_apcb.adm);
  553. }
  554. static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
  555. {
  556. DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
  557. bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
  558. bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
  559. (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
  560. return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
  561. AP_DOMAINS);
  562. }
  563. /*
  564. * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
  565. * to ensure no queue devices are passed through to
  566. * the guest that are not bound to the vfio_ap
  567. * device driver.
  568. *
  569. * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
  570. *
  571. * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
  572. * driver, its APID will be filtered from the guest's APCB. The matrix
  573. * structure precludes filtering an individual APQN, so its APID will be
  574. * filtered.
  575. *
  576. * Return: a boolean value indicating whether the KVM guest's APCB was changed
  577. * by the filtering or not.
  578. */
  579. static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
  580. struct ap_matrix_mdev *matrix_mdev)
  581. {
  582. unsigned long apid, apqi, apqn;
  583. DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
  584. DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
  585. struct vfio_ap_queue *q;
  586. bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
  587. bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
  588. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
  589. /*
  590. * Copy the adapters, domains and control domains to the shadow_apcb
  591. * from the matrix mdev, but only those that are assigned to the host's
  592. * AP configuration.
  593. */
  594. bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
  595. (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
  596. bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
  597. (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
  598. for_each_set_bit_inv(apid, apm, AP_DEVICES) {
  599. for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
  600. /*
  601. * If the APQN is not bound to the vfio_ap device
  602. * driver, then we can't assign it to the guest's
  603. * AP configuration. The AP architecture won't
  604. * allow filtering of a single APQN, so let's filter
  605. * the APID since an adapter represents a physical
  606. * hardware device.
  607. */
  608. apqn = AP_MKQID(apid, apqi);
  609. q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
  610. if (!q || q->reset_rc) {
  611. clear_bit_inv(apid,
  612. matrix_mdev->shadow_apcb.apm);
  613. break;
  614. }
  615. }
  616. }
  617. return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
  618. AP_DEVICES) ||
  619. !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
  620. AP_DOMAINS);
  621. }
  622. static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
  623. {
  624. struct ap_matrix_mdev *matrix_mdev =
  625. container_of(vdev, struct ap_matrix_mdev, vdev);
  626. matrix_mdev->mdev = to_mdev_device(vdev->dev);
  627. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
  628. matrix_mdev->pqap_hook = handle_pqap;
  629. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
  630. hash_init(matrix_mdev->qtable.queues);
  631. return 0;
  632. }
  633. static int vfio_ap_mdev_probe(struct mdev_device *mdev)
  634. {
  635. struct ap_matrix_mdev *matrix_mdev;
  636. int ret;
  637. matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
  638. &vfio_ap_matrix_dev_ops);
  639. if (IS_ERR(matrix_mdev))
  640. return PTR_ERR(matrix_mdev);
  641. ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
  642. if (ret)
  643. goto err_put_vdev;
  644. dev_set_drvdata(&mdev->dev, matrix_mdev);
  645. mutex_lock(&matrix_dev->mdevs_lock);
  646. list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
  647. mutex_unlock(&matrix_dev->mdevs_lock);
  648. return 0;
  649. err_put_vdev:
  650. vfio_put_device(&matrix_mdev->vdev);
  651. return ret;
  652. }
  653. static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
  654. struct vfio_ap_queue *q)
  655. {
  656. if (q) {
  657. q->matrix_mdev = matrix_mdev;
  658. hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
  659. }
  660. }
  661. static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
  662. {
  663. struct vfio_ap_queue *q;
  664. q = vfio_ap_find_queue(apqn);
  665. vfio_ap_mdev_link_queue(matrix_mdev, q);
  666. }
  667. static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
  668. {
  669. hash_del(&q->mdev_qnode);
  670. }
  671. static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
  672. {
  673. q->matrix_mdev = NULL;
  674. }
  675. static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
  676. {
  677. struct vfio_ap_queue *q;
  678. unsigned long apid, apqi;
  679. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
  680. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
  681. AP_DOMAINS) {
  682. q = vfio_ap_mdev_get_queue(matrix_mdev,
  683. AP_MKQID(apid, apqi));
  684. if (q)
  685. q->matrix_mdev = NULL;
  686. }
  687. }
  688. }
  689. static void vfio_ap_mdev_release_dev(struct vfio_device *vdev)
  690. {
  691. vfio_free_device(vdev);
  692. }
  693. static void vfio_ap_mdev_remove(struct mdev_device *mdev)
  694. {
  695. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
  696. vfio_unregister_group_dev(&matrix_mdev->vdev);
  697. mutex_lock(&matrix_dev->guests_lock);
  698. mutex_lock(&matrix_dev->mdevs_lock);
  699. vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
  700. vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
  701. list_del(&matrix_mdev->node);
  702. mutex_unlock(&matrix_dev->mdevs_lock);
  703. mutex_unlock(&matrix_dev->guests_lock);
  704. vfio_put_device(&matrix_mdev->vdev);
  705. }
  706. #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
  707. "already assigned to %s"
  708. static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
  709. unsigned long *apm,
  710. unsigned long *aqm)
  711. {
  712. unsigned long apid, apqi;
  713. const struct device *dev = mdev_dev(matrix_mdev->mdev);
  714. const char *mdev_name = dev_name(dev);
  715. for_each_set_bit_inv(apid, apm, AP_DEVICES)
  716. for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
  717. dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
  718. }
  719. /**
  720. * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
  721. *
  722. * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
  723. * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
  724. *
  725. * Verifies that each APQN derived from the Cartesian product of a bitmap of
  726. * AP adapter IDs and AP queue indexes is not configured for any matrix
  727. * mediated device. AP queue sharing is not allowed.
  728. *
  729. * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
  730. */
  731. static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
  732. unsigned long *mdev_aqm)
  733. {
  734. struct ap_matrix_mdev *matrix_mdev;
  735. DECLARE_BITMAP(apm, AP_DEVICES);
  736. DECLARE_BITMAP(aqm, AP_DOMAINS);
  737. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  738. /*
  739. * If the input apm and aqm are fields of the matrix_mdev
  740. * object, then move on to the next matrix_mdev.
  741. */
  742. if (mdev_apm == matrix_mdev->matrix.apm &&
  743. mdev_aqm == matrix_mdev->matrix.aqm)
  744. continue;
  745. memset(apm, 0, sizeof(apm));
  746. memset(aqm, 0, sizeof(aqm));
  747. /*
  748. * We work on full longs, as we can only exclude the leftover
  749. * bits in non-inverse order. The leftover is all zeros.
  750. */
  751. if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
  752. AP_DEVICES))
  753. continue;
  754. if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
  755. AP_DOMAINS))
  756. continue;
  757. vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
  758. return -EADDRINUSE;
  759. }
  760. return 0;
  761. }
  762. /**
  763. * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
  764. * not reserved for the default zcrypt driver and
  765. * are not assigned to another mdev.
  766. *
  767. * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
  768. *
  769. * Return: One of the following values:
  770. * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
  771. * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
  772. * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
  773. * zcrypt default driver.
  774. * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
  775. * o A zero indicating validation succeeded.
  776. */
  777. static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
  778. {
  779. if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
  780. matrix_mdev->matrix.aqm))
  781. return -EADDRNOTAVAIL;
  782. return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
  783. matrix_mdev->matrix.aqm);
  784. }
  785. static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
  786. unsigned long apid)
  787. {
  788. unsigned long apqi;
  789. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
  790. vfio_ap_mdev_link_apqn(matrix_mdev,
  791. AP_MKQID(apid, apqi));
  792. }
  793. /**
  794. * assign_adapter_store - parses the APID from @buf and sets the
  795. * corresponding bit in the mediated matrix device's APM
  796. *
  797. * @dev: the matrix device
  798. * @attr: the mediated matrix device's assign_adapter attribute
  799. * @buf: a buffer containing the AP adapter number (APID) to
  800. * be assigned
  801. * @count: the number of bytes in @buf
  802. *
  803. * Return: the number of bytes processed if the APID is valid; otherwise,
  804. * returns one of the following errors:
  805. *
  806. * 1. -EINVAL
  807. * The APID is not a valid number
  808. *
  809. * 2. -ENODEV
  810. * The APID exceeds the maximum value configured for the system
  811. *
  812. * 3. -EADDRNOTAVAIL
  813. * An APQN derived from the cross product of the APID being assigned
  814. * and the APQIs previously assigned is not bound to the vfio_ap device
  815. * driver; or, if no APQIs have yet been assigned, the APID is not
  816. * contained in an APQN bound to the vfio_ap device driver.
  817. *
  818. * 4. -EADDRINUSE
  819. * An APQN derived from the cross product of the APID being assigned
  820. * and the APQIs previously assigned is being used by another mediated
  821. * matrix device
  822. *
  823. * 5. -EAGAIN
  824. * A lock required to validate the mdev's AP configuration could not
  825. * be obtained.
  826. */
  827. static ssize_t assign_adapter_store(struct device *dev,
  828. struct device_attribute *attr,
  829. const char *buf, size_t count)
  830. {
  831. int ret;
  832. unsigned long apid;
  833. DECLARE_BITMAP(apm_delta, AP_DEVICES);
  834. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  835. mutex_lock(&ap_perms_mutex);
  836. get_update_locks_for_mdev(matrix_mdev);
  837. ret = kstrtoul(buf, 0, &apid);
  838. if (ret)
  839. goto done;
  840. if (apid > matrix_mdev->matrix.apm_max) {
  841. ret = -ENODEV;
  842. goto done;
  843. }
  844. if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
  845. ret = count;
  846. goto done;
  847. }
  848. set_bit_inv(apid, matrix_mdev->matrix.apm);
  849. ret = vfio_ap_mdev_validate_masks(matrix_mdev);
  850. if (ret) {
  851. clear_bit_inv(apid, matrix_mdev->matrix.apm);
  852. goto done;
  853. }
  854. vfio_ap_mdev_link_adapter(matrix_mdev, apid);
  855. memset(apm_delta, 0, sizeof(apm_delta));
  856. set_bit_inv(apid, apm_delta);
  857. if (vfio_ap_mdev_filter_matrix(apm_delta,
  858. matrix_mdev->matrix.aqm, matrix_mdev))
  859. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  860. ret = count;
  861. done:
  862. release_update_locks_for_mdev(matrix_mdev);
  863. mutex_unlock(&ap_perms_mutex);
  864. return ret;
  865. }
  866. static DEVICE_ATTR_WO(assign_adapter);
  867. static struct vfio_ap_queue
  868. *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
  869. unsigned long apid, unsigned long apqi)
  870. {
  871. struct vfio_ap_queue *q = NULL;
  872. q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
  873. /* If the queue is assigned to the matrix mdev, unlink it. */
  874. if (q)
  875. vfio_ap_unlink_queue_fr_mdev(q);
  876. return q;
  877. }
  878. /**
  879. * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
  880. * adapter from the matrix mdev to which the
  881. * adapter was assigned.
  882. * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
  883. * @apid: the APID of the unassigned adapter.
  884. * @qtable: table for storing queues associated with unassigned adapter.
  885. */
  886. static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
  887. unsigned long apid,
  888. struct ap_queue_table *qtable)
  889. {
  890. unsigned long apqi;
  891. struct vfio_ap_queue *q;
  892. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
  893. q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
  894. if (q && qtable) {
  895. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  896. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
  897. hash_add(qtable->queues, &q->mdev_qnode,
  898. q->apqn);
  899. }
  900. }
  901. }
  902. static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
  903. unsigned long apid)
  904. {
  905. int loop_cursor;
  906. struct vfio_ap_queue *q;
  907. struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
  908. hash_init(qtable->queues);
  909. vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
  910. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
  911. clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
  912. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  913. }
  914. vfio_ap_mdev_reset_queues(qtable);
  915. hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
  916. vfio_ap_unlink_mdev_fr_queue(q);
  917. hash_del(&q->mdev_qnode);
  918. }
  919. kfree(qtable);
  920. }
  921. /**
  922. * unassign_adapter_store - parses the APID from @buf and clears the
  923. * corresponding bit in the mediated matrix device's APM
  924. *
  925. * @dev: the matrix device
  926. * @attr: the mediated matrix device's unassign_adapter attribute
  927. * @buf: a buffer containing the adapter number (APID) to be unassigned
  928. * @count: the number of bytes in @buf
  929. *
  930. * Return: the number of bytes processed if the APID is valid; otherwise,
  931. * returns one of the following errors:
  932. * -EINVAL if the APID is not a number
  933. * -ENODEV if the APID it exceeds the maximum value configured for the
  934. * system
  935. */
  936. static ssize_t unassign_adapter_store(struct device *dev,
  937. struct device_attribute *attr,
  938. const char *buf, size_t count)
  939. {
  940. int ret;
  941. unsigned long apid;
  942. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  943. get_update_locks_for_mdev(matrix_mdev);
  944. ret = kstrtoul(buf, 0, &apid);
  945. if (ret)
  946. goto done;
  947. if (apid > matrix_mdev->matrix.apm_max) {
  948. ret = -ENODEV;
  949. goto done;
  950. }
  951. if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
  952. ret = count;
  953. goto done;
  954. }
  955. clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
  956. vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
  957. ret = count;
  958. done:
  959. release_update_locks_for_mdev(matrix_mdev);
  960. return ret;
  961. }
  962. static DEVICE_ATTR_WO(unassign_adapter);
  963. static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
  964. unsigned long apqi)
  965. {
  966. unsigned long apid;
  967. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
  968. vfio_ap_mdev_link_apqn(matrix_mdev,
  969. AP_MKQID(apid, apqi));
  970. }
  971. /**
  972. * assign_domain_store - parses the APQI from @buf and sets the
  973. * corresponding bit in the mediated matrix device's AQM
  974. *
  975. * @dev: the matrix device
  976. * @attr: the mediated matrix device's assign_domain attribute
  977. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  978. * be assigned
  979. * @count: the number of bytes in @buf
  980. *
  981. * Return: the number of bytes processed if the APQI is valid; otherwise returns
  982. * one of the following errors:
  983. *
  984. * 1. -EINVAL
  985. * The APQI is not a valid number
  986. *
  987. * 2. -ENODEV
  988. * The APQI exceeds the maximum value configured for the system
  989. *
  990. * 3. -EADDRNOTAVAIL
  991. * An APQN derived from the cross product of the APQI being assigned
  992. * and the APIDs previously assigned is not bound to the vfio_ap device
  993. * driver; or, if no APIDs have yet been assigned, the APQI is not
  994. * contained in an APQN bound to the vfio_ap device driver.
  995. *
  996. * 4. -EADDRINUSE
  997. * An APQN derived from the cross product of the APQI being assigned
  998. * and the APIDs previously assigned is being used by another mediated
  999. * matrix device
  1000. *
  1001. * 5. -EAGAIN
  1002. * The lock required to validate the mdev's AP configuration could not
  1003. * be obtained.
  1004. */
  1005. static ssize_t assign_domain_store(struct device *dev,
  1006. struct device_attribute *attr,
  1007. const char *buf, size_t count)
  1008. {
  1009. int ret;
  1010. unsigned long apqi;
  1011. DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
  1012. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1013. mutex_lock(&ap_perms_mutex);
  1014. get_update_locks_for_mdev(matrix_mdev);
  1015. ret = kstrtoul(buf, 0, &apqi);
  1016. if (ret)
  1017. goto done;
  1018. if (apqi > matrix_mdev->matrix.aqm_max) {
  1019. ret = -ENODEV;
  1020. goto done;
  1021. }
  1022. if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
  1023. ret = count;
  1024. goto done;
  1025. }
  1026. set_bit_inv(apqi, matrix_mdev->matrix.aqm);
  1027. ret = vfio_ap_mdev_validate_masks(matrix_mdev);
  1028. if (ret) {
  1029. clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
  1030. goto done;
  1031. }
  1032. vfio_ap_mdev_link_domain(matrix_mdev, apqi);
  1033. memset(aqm_delta, 0, sizeof(aqm_delta));
  1034. set_bit_inv(apqi, aqm_delta);
  1035. if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
  1036. matrix_mdev))
  1037. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1038. ret = count;
  1039. done:
  1040. release_update_locks_for_mdev(matrix_mdev);
  1041. mutex_unlock(&ap_perms_mutex);
  1042. return ret;
  1043. }
  1044. static DEVICE_ATTR_WO(assign_domain);
  1045. static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
  1046. unsigned long apqi,
  1047. struct ap_queue_table *qtable)
  1048. {
  1049. unsigned long apid;
  1050. struct vfio_ap_queue *q;
  1051. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
  1052. q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
  1053. if (q && qtable) {
  1054. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  1055. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
  1056. hash_add(qtable->queues, &q->mdev_qnode,
  1057. q->apqn);
  1058. }
  1059. }
  1060. }
  1061. static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
  1062. unsigned long apqi)
  1063. {
  1064. int loop_cursor;
  1065. struct vfio_ap_queue *q;
  1066. struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
  1067. hash_init(qtable->queues);
  1068. vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
  1069. if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
  1070. clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
  1071. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1072. }
  1073. vfio_ap_mdev_reset_queues(qtable);
  1074. hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
  1075. vfio_ap_unlink_mdev_fr_queue(q);
  1076. hash_del(&q->mdev_qnode);
  1077. }
  1078. kfree(qtable);
  1079. }
  1080. /**
  1081. * unassign_domain_store - parses the APQI from @buf and clears the
  1082. * corresponding bit in the mediated matrix device's AQM
  1083. *
  1084. * @dev: the matrix device
  1085. * @attr: the mediated matrix device's unassign_domain attribute
  1086. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  1087. * be unassigned
  1088. * @count: the number of bytes in @buf
  1089. *
  1090. * Return: the number of bytes processed if the APQI is valid; otherwise,
  1091. * returns one of the following errors:
  1092. * -EINVAL if the APQI is not a number
  1093. * -ENODEV if the APQI exceeds the maximum value configured for the system
  1094. */
  1095. static ssize_t unassign_domain_store(struct device *dev,
  1096. struct device_attribute *attr,
  1097. const char *buf, size_t count)
  1098. {
  1099. int ret;
  1100. unsigned long apqi;
  1101. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1102. get_update_locks_for_mdev(matrix_mdev);
  1103. ret = kstrtoul(buf, 0, &apqi);
  1104. if (ret)
  1105. goto done;
  1106. if (apqi > matrix_mdev->matrix.aqm_max) {
  1107. ret = -ENODEV;
  1108. goto done;
  1109. }
  1110. if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
  1111. ret = count;
  1112. goto done;
  1113. }
  1114. clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
  1115. vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
  1116. ret = count;
  1117. done:
  1118. release_update_locks_for_mdev(matrix_mdev);
  1119. return ret;
  1120. }
  1121. static DEVICE_ATTR_WO(unassign_domain);
  1122. /**
  1123. * assign_control_domain_store - parses the domain ID from @buf and sets
  1124. * the corresponding bit in the mediated matrix device's ADM
  1125. *
  1126. * @dev: the matrix device
  1127. * @attr: the mediated matrix device's assign_control_domain attribute
  1128. * @buf: a buffer containing the domain ID to be assigned
  1129. * @count: the number of bytes in @buf
  1130. *
  1131. * Return: the number of bytes processed if the domain ID is valid; otherwise,
  1132. * returns one of the following errors:
  1133. * -EINVAL if the ID is not a number
  1134. * -ENODEV if the ID exceeds the maximum value configured for the system
  1135. */
  1136. static ssize_t assign_control_domain_store(struct device *dev,
  1137. struct device_attribute *attr,
  1138. const char *buf, size_t count)
  1139. {
  1140. int ret;
  1141. unsigned long id;
  1142. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1143. get_update_locks_for_mdev(matrix_mdev);
  1144. ret = kstrtoul(buf, 0, &id);
  1145. if (ret)
  1146. goto done;
  1147. if (id > matrix_mdev->matrix.adm_max) {
  1148. ret = -ENODEV;
  1149. goto done;
  1150. }
  1151. if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
  1152. ret = count;
  1153. goto done;
  1154. }
  1155. /* Set the bit in the ADM (bitmask) corresponding to the AP control
  1156. * domain number (id). The bits in the mask, from most significant to
  1157. * least significant, correspond to IDs 0 up to the one less than the
  1158. * number of control domains that can be assigned.
  1159. */
  1160. set_bit_inv(id, matrix_mdev->matrix.adm);
  1161. if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
  1162. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1163. ret = count;
  1164. done:
  1165. release_update_locks_for_mdev(matrix_mdev);
  1166. return ret;
  1167. }
  1168. static DEVICE_ATTR_WO(assign_control_domain);
  1169. /**
  1170. * unassign_control_domain_store - parses the domain ID from @buf and
  1171. * clears the corresponding bit in the mediated matrix device's ADM
  1172. *
  1173. * @dev: the matrix device
  1174. * @attr: the mediated matrix device's unassign_control_domain attribute
  1175. * @buf: a buffer containing the domain ID to be unassigned
  1176. * @count: the number of bytes in @buf
  1177. *
  1178. * Return: the number of bytes processed if the domain ID is valid; otherwise,
  1179. * returns one of the following errors:
  1180. * -EINVAL if the ID is not a number
  1181. * -ENODEV if the ID exceeds the maximum value configured for the system
  1182. */
  1183. static ssize_t unassign_control_domain_store(struct device *dev,
  1184. struct device_attribute *attr,
  1185. const char *buf, size_t count)
  1186. {
  1187. int ret;
  1188. unsigned long domid;
  1189. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1190. get_update_locks_for_mdev(matrix_mdev);
  1191. ret = kstrtoul(buf, 0, &domid);
  1192. if (ret)
  1193. goto done;
  1194. if (domid > matrix_mdev->matrix.adm_max) {
  1195. ret = -ENODEV;
  1196. goto done;
  1197. }
  1198. if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
  1199. ret = count;
  1200. goto done;
  1201. }
  1202. clear_bit_inv(domid, matrix_mdev->matrix.adm);
  1203. if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
  1204. clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
  1205. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1206. }
  1207. ret = count;
  1208. done:
  1209. release_update_locks_for_mdev(matrix_mdev);
  1210. return ret;
  1211. }
  1212. static DEVICE_ATTR_WO(unassign_control_domain);
  1213. static ssize_t control_domains_show(struct device *dev,
  1214. struct device_attribute *dev_attr,
  1215. char *buf)
  1216. {
  1217. unsigned long id;
  1218. int nchars = 0;
  1219. int n;
  1220. char *bufpos = buf;
  1221. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1222. unsigned long max_domid = matrix_mdev->matrix.adm_max;
  1223. mutex_lock(&matrix_dev->mdevs_lock);
  1224. for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
  1225. n = sprintf(bufpos, "%04lx\n", id);
  1226. bufpos += n;
  1227. nchars += n;
  1228. }
  1229. mutex_unlock(&matrix_dev->mdevs_lock);
  1230. return nchars;
  1231. }
  1232. static DEVICE_ATTR_RO(control_domains);
  1233. static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
  1234. {
  1235. char *bufpos = buf;
  1236. unsigned long apid;
  1237. unsigned long apqi;
  1238. unsigned long apid1;
  1239. unsigned long apqi1;
  1240. unsigned long napm_bits = matrix->apm_max + 1;
  1241. unsigned long naqm_bits = matrix->aqm_max + 1;
  1242. int nchars = 0;
  1243. int n;
  1244. apid1 = find_first_bit_inv(matrix->apm, napm_bits);
  1245. apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
  1246. if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
  1247. for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
  1248. for_each_set_bit_inv(apqi, matrix->aqm,
  1249. naqm_bits) {
  1250. n = sprintf(bufpos, "%02lx.%04lx\n", apid,
  1251. apqi);
  1252. bufpos += n;
  1253. nchars += n;
  1254. }
  1255. }
  1256. } else if (apid1 < napm_bits) {
  1257. for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
  1258. n = sprintf(bufpos, "%02lx.\n", apid);
  1259. bufpos += n;
  1260. nchars += n;
  1261. }
  1262. } else if (apqi1 < naqm_bits) {
  1263. for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
  1264. n = sprintf(bufpos, ".%04lx\n", apqi);
  1265. bufpos += n;
  1266. nchars += n;
  1267. }
  1268. }
  1269. return nchars;
  1270. }
  1271. static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
  1272. char *buf)
  1273. {
  1274. ssize_t nchars;
  1275. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1276. mutex_lock(&matrix_dev->mdevs_lock);
  1277. nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
  1278. mutex_unlock(&matrix_dev->mdevs_lock);
  1279. return nchars;
  1280. }
  1281. static DEVICE_ATTR_RO(matrix);
  1282. static ssize_t guest_matrix_show(struct device *dev,
  1283. struct device_attribute *attr, char *buf)
  1284. {
  1285. ssize_t nchars;
  1286. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1287. mutex_lock(&matrix_dev->mdevs_lock);
  1288. nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
  1289. mutex_unlock(&matrix_dev->mdevs_lock);
  1290. return nchars;
  1291. }
  1292. static DEVICE_ATTR_RO(guest_matrix);
  1293. static struct attribute *vfio_ap_mdev_attrs[] = {
  1294. &dev_attr_assign_adapter.attr,
  1295. &dev_attr_unassign_adapter.attr,
  1296. &dev_attr_assign_domain.attr,
  1297. &dev_attr_unassign_domain.attr,
  1298. &dev_attr_assign_control_domain.attr,
  1299. &dev_attr_unassign_control_domain.attr,
  1300. &dev_attr_control_domains.attr,
  1301. &dev_attr_matrix.attr,
  1302. &dev_attr_guest_matrix.attr,
  1303. NULL,
  1304. };
  1305. static struct attribute_group vfio_ap_mdev_attr_group = {
  1306. .attrs = vfio_ap_mdev_attrs
  1307. };
  1308. static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  1309. &vfio_ap_mdev_attr_group,
  1310. NULL
  1311. };
  1312. /**
  1313. * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
  1314. * to manage AP resources for the guest whose state is represented by @kvm
  1315. *
  1316. * @matrix_mdev: a mediated matrix device
  1317. * @kvm: reference to KVM instance
  1318. *
  1319. * Return: 0 if no other mediated matrix device has a reference to @kvm;
  1320. * otherwise, returns an -EPERM.
  1321. */
  1322. static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  1323. struct kvm *kvm)
  1324. {
  1325. struct ap_matrix_mdev *m;
  1326. if (kvm->arch.crypto.crycbd) {
  1327. down_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1328. kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
  1329. up_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1330. get_update_locks_for_kvm(kvm);
  1331. list_for_each_entry(m, &matrix_dev->mdev_list, node) {
  1332. if (m != matrix_mdev && m->kvm == kvm) {
  1333. release_update_locks_for_kvm(kvm);
  1334. return -EPERM;
  1335. }
  1336. }
  1337. kvm_get_kvm(kvm);
  1338. matrix_mdev->kvm = kvm;
  1339. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1340. release_update_locks_for_kvm(kvm);
  1341. }
  1342. return 0;
  1343. }
  1344. static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
  1345. u64 length)
  1346. {
  1347. struct ap_matrix_mdev *matrix_mdev =
  1348. container_of(vdev, struct ap_matrix_mdev, vdev);
  1349. vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
  1350. }
  1351. /**
  1352. * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
  1353. * by @matrix_mdev.
  1354. *
  1355. * @matrix_mdev: a matrix mediated device
  1356. */
  1357. static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
  1358. {
  1359. struct kvm *kvm = matrix_mdev->kvm;
  1360. if (kvm && kvm->arch.crypto.crycbd) {
  1361. down_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1362. kvm->arch.crypto.pqap_hook = NULL;
  1363. up_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1364. get_update_locks_for_kvm(kvm);
  1365. kvm_arch_crypto_clear_masks(kvm);
  1366. vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
  1367. kvm_put_kvm(kvm);
  1368. matrix_mdev->kvm = NULL;
  1369. release_update_locks_for_kvm(kvm);
  1370. }
  1371. }
  1372. static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
  1373. {
  1374. struct ap_queue *queue;
  1375. struct vfio_ap_queue *q = NULL;
  1376. queue = ap_get_qdev(apqn);
  1377. if (!queue)
  1378. return NULL;
  1379. if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
  1380. q = dev_get_drvdata(&queue->ap_dev.device);
  1381. put_device(&queue->ap_dev.device);
  1382. return q;
  1383. }
  1384. static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
  1385. unsigned int retry)
  1386. {
  1387. struct ap_queue_status status;
  1388. int ret;
  1389. int retry2 = 2;
  1390. if (!q)
  1391. return 0;
  1392. retry_zapq:
  1393. status = ap_zapq(q->apqn);
  1394. q->reset_rc = status.response_code;
  1395. switch (status.response_code) {
  1396. case AP_RESPONSE_NORMAL:
  1397. ret = 0;
  1398. break;
  1399. case AP_RESPONSE_RESET_IN_PROGRESS:
  1400. if (retry--) {
  1401. msleep(20);
  1402. goto retry_zapq;
  1403. }
  1404. ret = -EBUSY;
  1405. break;
  1406. case AP_RESPONSE_Q_NOT_AVAIL:
  1407. case AP_RESPONSE_DECONFIGURED:
  1408. case AP_RESPONSE_CHECKSTOPPED:
  1409. WARN_ONCE(status.irq_enabled,
  1410. "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
  1411. AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
  1412. status.response_code);
  1413. ret = -EBUSY;
  1414. goto free_resources;
  1415. default:
  1416. /* things are really broken, give up */
  1417. WARN(true,
  1418. "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
  1419. AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
  1420. status.response_code);
  1421. return -EIO;
  1422. }
  1423. /* wait for the reset to take effect */
  1424. while (retry2--) {
  1425. if (status.queue_empty && !status.irq_enabled)
  1426. break;
  1427. msleep(20);
  1428. status = ap_tapq(q->apqn, NULL);
  1429. }
  1430. WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
  1431. AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
  1432. free_resources:
  1433. vfio_ap_free_aqic_resources(q);
  1434. return ret;
  1435. }
  1436. static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
  1437. {
  1438. int ret, loop_cursor, rc = 0;
  1439. struct vfio_ap_queue *q;
  1440. hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
  1441. ret = vfio_ap_mdev_reset_queue(q, 1);
  1442. /*
  1443. * Regardless whether a queue turns out to be busy, or
  1444. * is not operational, we need to continue resetting
  1445. * the remaining queues.
  1446. */
  1447. if (ret)
  1448. rc = ret;
  1449. }
  1450. return rc;
  1451. }
  1452. static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
  1453. {
  1454. struct ap_matrix_mdev *matrix_mdev =
  1455. container_of(vdev, struct ap_matrix_mdev, vdev);
  1456. if (!vdev->kvm)
  1457. return -EINVAL;
  1458. return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
  1459. }
  1460. static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
  1461. {
  1462. struct ap_matrix_mdev *matrix_mdev =
  1463. container_of(vdev, struct ap_matrix_mdev, vdev);
  1464. vfio_ap_mdev_unset_kvm(matrix_mdev);
  1465. }
  1466. static int vfio_ap_mdev_get_device_info(unsigned long arg)
  1467. {
  1468. unsigned long minsz;
  1469. struct vfio_device_info info;
  1470. minsz = offsetofend(struct vfio_device_info, num_irqs);
  1471. if (copy_from_user(&info, (void __user *)arg, minsz))
  1472. return -EFAULT;
  1473. if (info.argsz < minsz)
  1474. return -EINVAL;
  1475. info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
  1476. info.num_regions = 0;
  1477. info.num_irqs = 0;
  1478. return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
  1479. }
  1480. static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
  1481. unsigned int cmd, unsigned long arg)
  1482. {
  1483. struct ap_matrix_mdev *matrix_mdev =
  1484. container_of(vdev, struct ap_matrix_mdev, vdev);
  1485. int ret;
  1486. mutex_lock(&matrix_dev->mdevs_lock);
  1487. switch (cmd) {
  1488. case VFIO_DEVICE_GET_INFO:
  1489. ret = vfio_ap_mdev_get_device_info(arg);
  1490. break;
  1491. case VFIO_DEVICE_RESET:
  1492. ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
  1493. break;
  1494. default:
  1495. ret = -EOPNOTSUPP;
  1496. break;
  1497. }
  1498. mutex_unlock(&matrix_dev->mdevs_lock);
  1499. return ret;
  1500. }
  1501. static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
  1502. {
  1503. struct ap_matrix_mdev *matrix_mdev;
  1504. unsigned long apid = AP_QID_CARD(q->apqn);
  1505. unsigned long apqi = AP_QID_QUEUE(q->apqn);
  1506. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  1507. if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
  1508. test_bit_inv(apqi, matrix_mdev->matrix.aqm))
  1509. return matrix_mdev;
  1510. }
  1511. return NULL;
  1512. }
  1513. static ssize_t status_show(struct device *dev,
  1514. struct device_attribute *attr,
  1515. char *buf)
  1516. {
  1517. ssize_t nchars = 0;
  1518. struct vfio_ap_queue *q;
  1519. struct ap_matrix_mdev *matrix_mdev;
  1520. struct ap_device *apdev = to_ap_dev(dev);
  1521. mutex_lock(&matrix_dev->mdevs_lock);
  1522. q = dev_get_drvdata(&apdev->device);
  1523. matrix_mdev = vfio_ap_mdev_for_queue(q);
  1524. if (matrix_mdev) {
  1525. if (matrix_mdev->kvm)
  1526. nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
  1527. AP_QUEUE_IN_USE);
  1528. else
  1529. nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
  1530. AP_QUEUE_ASSIGNED);
  1531. } else {
  1532. nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
  1533. AP_QUEUE_UNASSIGNED);
  1534. }
  1535. mutex_unlock(&matrix_dev->mdevs_lock);
  1536. return nchars;
  1537. }
  1538. static DEVICE_ATTR_RO(status);
  1539. static struct attribute *vfio_queue_attrs[] = {
  1540. &dev_attr_status.attr,
  1541. NULL,
  1542. };
  1543. static const struct attribute_group vfio_queue_attr_group = {
  1544. .attrs = vfio_queue_attrs,
  1545. };
  1546. static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
  1547. .init = vfio_ap_mdev_init_dev,
  1548. .release = vfio_ap_mdev_release_dev,
  1549. .open_device = vfio_ap_mdev_open_device,
  1550. .close_device = vfio_ap_mdev_close_device,
  1551. .ioctl = vfio_ap_mdev_ioctl,
  1552. .dma_unmap = vfio_ap_mdev_dma_unmap,
  1553. };
  1554. static struct mdev_driver vfio_ap_matrix_driver = {
  1555. .device_api = VFIO_DEVICE_API_AP_STRING,
  1556. .max_instances = MAX_ZDEV_ENTRIES_EXT,
  1557. .driver = {
  1558. .name = "vfio_ap_mdev",
  1559. .owner = THIS_MODULE,
  1560. .mod_name = KBUILD_MODNAME,
  1561. .dev_groups = vfio_ap_mdev_attr_groups,
  1562. },
  1563. .probe = vfio_ap_mdev_probe,
  1564. .remove = vfio_ap_mdev_remove,
  1565. };
  1566. int vfio_ap_mdev_register(void)
  1567. {
  1568. int ret;
  1569. ret = mdev_register_driver(&vfio_ap_matrix_driver);
  1570. if (ret)
  1571. return ret;
  1572. matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
  1573. matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
  1574. matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
  1575. ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
  1576. &vfio_ap_matrix_driver,
  1577. matrix_dev->mdev_types, 1);
  1578. if (ret)
  1579. goto err_driver;
  1580. return 0;
  1581. err_driver:
  1582. mdev_unregister_driver(&vfio_ap_matrix_driver);
  1583. return ret;
  1584. }
  1585. void vfio_ap_mdev_unregister(void)
  1586. {
  1587. mdev_unregister_parent(&matrix_dev->parent);
  1588. mdev_unregister_driver(&vfio_ap_matrix_driver);
  1589. }
  1590. int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
  1591. {
  1592. int ret;
  1593. struct vfio_ap_queue *q;
  1594. struct ap_matrix_mdev *matrix_mdev;
  1595. ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
  1596. if (ret)
  1597. return ret;
  1598. q = kzalloc(sizeof(*q), GFP_KERNEL);
  1599. if (!q) {
  1600. ret = -ENOMEM;
  1601. goto err_remove_group;
  1602. }
  1603. q->apqn = to_ap_queue(&apdev->device)->qid;
  1604. q->saved_isc = VFIO_AP_ISC_INVALID;
  1605. matrix_mdev = get_update_locks_by_apqn(q->apqn);
  1606. if (matrix_mdev) {
  1607. vfio_ap_mdev_link_queue(matrix_mdev, q);
  1608. if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
  1609. matrix_mdev->matrix.aqm,
  1610. matrix_mdev))
  1611. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1612. }
  1613. dev_set_drvdata(&apdev->device, q);
  1614. release_update_locks_for_mdev(matrix_mdev);
  1615. return 0;
  1616. err_remove_group:
  1617. sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
  1618. return ret;
  1619. }
  1620. void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
  1621. {
  1622. unsigned long apid, apqi;
  1623. struct vfio_ap_queue *q;
  1624. struct ap_matrix_mdev *matrix_mdev;
  1625. sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
  1626. q = dev_get_drvdata(&apdev->device);
  1627. get_update_locks_for_queue(q);
  1628. matrix_mdev = q->matrix_mdev;
  1629. if (matrix_mdev) {
  1630. vfio_ap_unlink_queue_fr_mdev(q);
  1631. apid = AP_QID_CARD(q->apqn);
  1632. apqi = AP_QID_QUEUE(q->apqn);
  1633. /*
  1634. * If the queue is assigned to the guest's APCB, then remove
  1635. * the adapter's APID from the APCB and hot it into the guest.
  1636. */
  1637. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  1638. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
  1639. clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
  1640. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1641. }
  1642. }
  1643. vfio_ap_mdev_reset_queue(q, 1);
  1644. dev_set_drvdata(&apdev->device, NULL);
  1645. kfree(q);
  1646. release_update_locks_for_mdev(matrix_mdev);
  1647. }
  1648. /**
  1649. * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
  1650. * assigned to a mediated device under the control
  1651. * of the vfio_ap device driver.
  1652. *
  1653. * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
  1654. * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
  1655. *
  1656. * Return:
  1657. * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
  1658. * assigned to a mediated device under the control of the vfio_ap
  1659. * device driver.
  1660. * * Otherwise, return 0.
  1661. */
  1662. int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
  1663. {
  1664. int ret;
  1665. mutex_lock(&matrix_dev->guests_lock);
  1666. mutex_lock(&matrix_dev->mdevs_lock);
  1667. ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
  1668. mutex_unlock(&matrix_dev->mdevs_lock);
  1669. mutex_unlock(&matrix_dev->guests_lock);
  1670. return ret;
  1671. }
  1672. /**
  1673. * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
  1674. * domains that have been removed from the host's
  1675. * AP configuration from a guest.
  1676. *
  1677. * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
  1678. * @aprem: the adapters that have been removed from the host's AP configuration
  1679. * @aqrem: the domains that have been removed from the host's AP configuration
  1680. * @cdrem: the control domains that have been removed from the host's AP
  1681. * configuration.
  1682. */
  1683. static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
  1684. unsigned long *aprem,
  1685. unsigned long *aqrem,
  1686. unsigned long *cdrem)
  1687. {
  1688. int do_hotplug = 0;
  1689. if (!bitmap_empty(aprem, AP_DEVICES)) {
  1690. do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
  1691. matrix_mdev->shadow_apcb.apm,
  1692. aprem, AP_DEVICES);
  1693. }
  1694. if (!bitmap_empty(aqrem, AP_DOMAINS)) {
  1695. do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
  1696. matrix_mdev->shadow_apcb.aqm,
  1697. aqrem, AP_DEVICES);
  1698. }
  1699. if (!bitmap_empty(cdrem, AP_DOMAINS))
  1700. do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
  1701. matrix_mdev->shadow_apcb.adm,
  1702. cdrem, AP_DOMAINS);
  1703. if (do_hotplug)
  1704. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1705. }
  1706. /**
  1707. * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
  1708. * domains and control domains that have been removed
  1709. * from the host AP configuration and unplugs them
  1710. * from those guests.
  1711. *
  1712. * @ap_remove: bitmap specifying which adapters have been removed from the host
  1713. * config.
  1714. * @aq_remove: bitmap specifying which domains have been removed from the host
  1715. * config.
  1716. * @cd_remove: bitmap specifying which control domains have been removed from
  1717. * the host config.
  1718. */
  1719. static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
  1720. unsigned long *aq_remove,
  1721. unsigned long *cd_remove)
  1722. {
  1723. struct ap_matrix_mdev *matrix_mdev;
  1724. DECLARE_BITMAP(aprem, AP_DEVICES);
  1725. DECLARE_BITMAP(aqrem, AP_DOMAINS);
  1726. DECLARE_BITMAP(cdrem, AP_DOMAINS);
  1727. int do_remove = 0;
  1728. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  1729. mutex_lock(&matrix_mdev->kvm->lock);
  1730. mutex_lock(&matrix_dev->mdevs_lock);
  1731. do_remove |= bitmap_and(aprem, ap_remove,
  1732. matrix_mdev->matrix.apm,
  1733. AP_DEVICES);
  1734. do_remove |= bitmap_and(aqrem, aq_remove,
  1735. matrix_mdev->matrix.aqm,
  1736. AP_DOMAINS);
  1737. do_remove |= bitmap_andnot(cdrem, cd_remove,
  1738. matrix_mdev->matrix.adm,
  1739. AP_DOMAINS);
  1740. if (do_remove)
  1741. vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
  1742. cdrem);
  1743. mutex_unlock(&matrix_dev->mdevs_lock);
  1744. mutex_unlock(&matrix_mdev->kvm->lock);
  1745. }
  1746. }
  1747. /**
  1748. * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
  1749. * control domains from the host AP configuration
  1750. * by unplugging them from the guests that are
  1751. * using them.
  1752. * @cur_config_info: the current host AP configuration information
  1753. * @prev_config_info: the previous host AP configuration information
  1754. */
  1755. static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
  1756. struct ap_config_info *prev_config_info)
  1757. {
  1758. int do_remove;
  1759. DECLARE_BITMAP(aprem, AP_DEVICES);
  1760. DECLARE_BITMAP(aqrem, AP_DOMAINS);
  1761. DECLARE_BITMAP(cdrem, AP_DOMAINS);
  1762. do_remove = bitmap_andnot(aprem,
  1763. (unsigned long *)prev_config_info->apm,
  1764. (unsigned long *)cur_config_info->apm,
  1765. AP_DEVICES);
  1766. do_remove |= bitmap_andnot(aqrem,
  1767. (unsigned long *)prev_config_info->aqm,
  1768. (unsigned long *)cur_config_info->aqm,
  1769. AP_DEVICES);
  1770. do_remove |= bitmap_andnot(cdrem,
  1771. (unsigned long *)prev_config_info->adm,
  1772. (unsigned long *)cur_config_info->adm,
  1773. AP_DEVICES);
  1774. if (do_remove)
  1775. vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
  1776. }
  1777. /**
  1778. * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
  1779. * are older than AP type 10 (CEX4).
  1780. * @apm: a bitmap of the APIDs to examine
  1781. * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
  1782. */
  1783. static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
  1784. {
  1785. bool apid_cleared;
  1786. struct ap_queue_status status;
  1787. unsigned long apid, apqi, info;
  1788. int qtype, qtype_mask = 0xff000000;
  1789. for_each_set_bit_inv(apid, apm, AP_DEVICES) {
  1790. apid_cleared = false;
  1791. for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
  1792. status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
  1793. switch (status.response_code) {
  1794. /*
  1795. * According to the architecture in each case
  1796. * below, the queue's info should be filled.
  1797. */
  1798. case AP_RESPONSE_NORMAL:
  1799. case AP_RESPONSE_RESET_IN_PROGRESS:
  1800. case AP_RESPONSE_DECONFIGURED:
  1801. case AP_RESPONSE_CHECKSTOPPED:
  1802. case AP_RESPONSE_BUSY:
  1803. qtype = info & qtype_mask;
  1804. /*
  1805. * The vfio_ap device driver only
  1806. * supports CEX4 and newer adapters, so
  1807. * remove the APID if the adapter is
  1808. * older than a CEX4.
  1809. */
  1810. if (qtype < AP_DEVICE_TYPE_CEX4) {
  1811. clear_bit_inv(apid, apm);
  1812. apid_cleared = true;
  1813. }
  1814. break;
  1815. default:
  1816. /*
  1817. * If we don't know the adapter type,
  1818. * clear its APID since it can't be
  1819. * determined whether the vfio_ap
  1820. * device driver supports it.
  1821. */
  1822. clear_bit_inv(apid, apm);
  1823. apid_cleared = true;
  1824. break;
  1825. }
  1826. /*
  1827. * If we've already cleared the APID from the apm, there
  1828. * is no need to continue examining the remainin AP
  1829. * queues to determine the type of the adapter.
  1830. */
  1831. if (apid_cleared)
  1832. continue;
  1833. }
  1834. }
  1835. }
  1836. /**
  1837. * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
  1838. * control domains that have been added to the host's
  1839. * AP configuration for each matrix mdev to which they
  1840. * are assigned.
  1841. *
  1842. * @apm_add: a bitmap specifying the adapters that have been added to the AP
  1843. * configuration.
  1844. * @aqm_add: a bitmap specifying the domains that have been added to the AP
  1845. * configuration.
  1846. * @adm_add: a bitmap specifying the control domains that have been added to the
  1847. * AP configuration.
  1848. */
  1849. static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
  1850. unsigned long *adm_add)
  1851. {
  1852. struct ap_matrix_mdev *matrix_mdev;
  1853. if (list_empty(&matrix_dev->mdev_list))
  1854. return;
  1855. vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
  1856. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  1857. bitmap_and(matrix_mdev->apm_add,
  1858. matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
  1859. bitmap_and(matrix_mdev->aqm_add,
  1860. matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
  1861. bitmap_and(matrix_mdev->adm_add,
  1862. matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
  1863. }
  1864. }
  1865. /**
  1866. * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
  1867. * control domains to the host AP configuration
  1868. * by updating the bitmaps that specify what adapters,
  1869. * domains and control domains have been added so they
  1870. * can be hot plugged into the guest when the AP bus
  1871. * scan completes (see vfio_ap_on_scan_complete
  1872. * function).
  1873. * @cur_config_info: the current AP configuration information
  1874. * @prev_config_info: the previous AP configuration information
  1875. */
  1876. static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
  1877. struct ap_config_info *prev_config_info)
  1878. {
  1879. bool do_add;
  1880. DECLARE_BITMAP(apm_add, AP_DEVICES);
  1881. DECLARE_BITMAP(aqm_add, AP_DOMAINS);
  1882. DECLARE_BITMAP(adm_add, AP_DOMAINS);
  1883. do_add = bitmap_andnot(apm_add,
  1884. (unsigned long *)cur_config_info->apm,
  1885. (unsigned long *)prev_config_info->apm,
  1886. AP_DEVICES);
  1887. do_add |= bitmap_andnot(aqm_add,
  1888. (unsigned long *)cur_config_info->aqm,
  1889. (unsigned long *)prev_config_info->aqm,
  1890. AP_DOMAINS);
  1891. do_add |= bitmap_andnot(adm_add,
  1892. (unsigned long *)cur_config_info->adm,
  1893. (unsigned long *)prev_config_info->adm,
  1894. AP_DOMAINS);
  1895. if (do_add)
  1896. vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
  1897. }
  1898. /**
  1899. * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
  1900. * configuration.
  1901. *
  1902. * @cur_cfg_info: the current host AP configuration
  1903. * @prev_cfg_info: the previous host AP configuration
  1904. */
  1905. void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
  1906. struct ap_config_info *prev_cfg_info)
  1907. {
  1908. if (!cur_cfg_info || !prev_cfg_info)
  1909. return;
  1910. mutex_lock(&matrix_dev->guests_lock);
  1911. vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
  1912. vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
  1913. memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
  1914. mutex_unlock(&matrix_dev->guests_lock);
  1915. }
  1916. static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
  1917. {
  1918. bool do_hotplug = false;
  1919. int filter_domains = 0;
  1920. int filter_adapters = 0;
  1921. DECLARE_BITMAP(apm, AP_DEVICES);
  1922. DECLARE_BITMAP(aqm, AP_DOMAINS);
  1923. mutex_lock(&matrix_mdev->kvm->lock);
  1924. mutex_lock(&matrix_dev->mdevs_lock);
  1925. filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
  1926. matrix_mdev->apm_add, AP_DEVICES);
  1927. filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
  1928. matrix_mdev->aqm_add, AP_DOMAINS);
  1929. if (filter_adapters && filter_domains)
  1930. do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
  1931. else if (filter_adapters)
  1932. do_hotplug |=
  1933. vfio_ap_mdev_filter_matrix(apm,
  1934. matrix_mdev->shadow_apcb.aqm,
  1935. matrix_mdev);
  1936. else
  1937. do_hotplug |=
  1938. vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
  1939. aqm, matrix_mdev);
  1940. if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
  1941. AP_DOMAINS))
  1942. do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
  1943. if (do_hotplug)
  1944. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1945. mutex_unlock(&matrix_dev->mdevs_lock);
  1946. mutex_unlock(&matrix_mdev->kvm->lock);
  1947. }
  1948. void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
  1949. struct ap_config_info *old_config_info)
  1950. {
  1951. struct ap_matrix_mdev *matrix_mdev;
  1952. mutex_lock(&matrix_dev->guests_lock);
  1953. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  1954. if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
  1955. bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
  1956. bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
  1957. continue;
  1958. vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
  1959. bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
  1960. bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
  1961. bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
  1962. }
  1963. mutex_unlock(&matrix_dev->guests_lock);
  1964. }