virtio_pci_common.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio PCI driver - common functionality for all device versions
  4. *
  5. * This module allows virtio devices to be used over a virtual PCI device.
  6. * This can be used with QEMU based VMMs like KVM or Xen.
  7. *
  8. * Copyright IBM Corp. 2007
  9. * Copyright Red Hat, Inc. 2014
  10. *
  11. * Authors:
  12. * Anthony Liguori <[email protected]>
  13. * Rusty Russell <[email protected]>
  14. * Michael S. Tsirkin <[email protected]>
  15. */
  16. #include "virtio_pci_common.h"
  17. static bool force_legacy = false;
  18. #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
  19. module_param(force_legacy, bool, 0444);
  20. MODULE_PARM_DESC(force_legacy,
  21. "Force legacy mode for transitional virtio 1 devices");
  22. #endif
  23. /* wait for pending irq handlers */
  24. void vp_synchronize_vectors(struct virtio_device *vdev)
  25. {
  26. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  27. int i;
  28. if (vp_dev->intx_enabled)
  29. synchronize_irq(vp_dev->pci_dev->irq);
  30. for (i = 0; i < vp_dev->msix_vectors; ++i)
  31. synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
  32. }
  33. /* the notify function used when creating a virt queue */
  34. bool vp_notify(struct virtqueue *vq)
  35. {
  36. /* we write the queue's selector into the notification register to
  37. * signal the other end */
  38. iowrite16(vq->index, (void __iomem *)vq->priv);
  39. return true;
  40. }
  41. /* Handle a configuration change: Tell driver if it wants to know. */
  42. static irqreturn_t vp_config_changed(int irq, void *opaque)
  43. {
  44. struct virtio_pci_device *vp_dev = opaque;
  45. virtio_config_changed(&vp_dev->vdev);
  46. return IRQ_HANDLED;
  47. }
  48. /* Notify all virtqueues on an interrupt. */
  49. static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
  50. {
  51. struct virtio_pci_device *vp_dev = opaque;
  52. struct virtio_pci_vq_info *info;
  53. irqreturn_t ret = IRQ_NONE;
  54. unsigned long flags;
  55. spin_lock_irqsave(&vp_dev->lock, flags);
  56. list_for_each_entry(info, &vp_dev->virtqueues, node) {
  57. if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
  58. ret = IRQ_HANDLED;
  59. }
  60. spin_unlock_irqrestore(&vp_dev->lock, flags);
  61. return ret;
  62. }
  63. /* A small wrapper to also acknowledge the interrupt when it's handled.
  64. * I really need an EIO hook for the vring so I can ack the interrupt once we
  65. * know that we'll be handling the IRQ but before we invoke the callback since
  66. * the callback may notify the host which results in the host attempting to
  67. * raise an interrupt that we would then mask once we acknowledged the
  68. * interrupt. */
  69. static irqreturn_t vp_interrupt(int irq, void *opaque)
  70. {
  71. struct virtio_pci_device *vp_dev = opaque;
  72. u8 isr;
  73. /* reading the ISR has the effect of also clearing it so it's very
  74. * important to save off the value. */
  75. isr = ioread8(vp_dev->isr);
  76. /* It's definitely not us if the ISR was not high */
  77. if (!isr)
  78. return IRQ_NONE;
  79. /* Configuration change? Tell driver if it wants to know. */
  80. if (isr & VIRTIO_PCI_ISR_CONFIG)
  81. vp_config_changed(irq, opaque);
  82. return vp_vring_interrupt(irq, opaque);
  83. }
  84. static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
  85. bool per_vq_vectors, struct irq_affinity *desc)
  86. {
  87. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  88. const char *name = dev_name(&vp_dev->vdev.dev);
  89. unsigned int flags = PCI_IRQ_MSIX;
  90. unsigned int i, v;
  91. int err = -ENOMEM;
  92. vp_dev->msix_vectors = nvectors;
  93. vp_dev->msix_names = kmalloc_array(nvectors,
  94. sizeof(*vp_dev->msix_names),
  95. GFP_KERNEL);
  96. if (!vp_dev->msix_names)
  97. goto error;
  98. vp_dev->msix_affinity_masks
  99. = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
  100. GFP_KERNEL);
  101. if (!vp_dev->msix_affinity_masks)
  102. goto error;
  103. for (i = 0; i < nvectors; ++i)
  104. if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
  105. GFP_KERNEL))
  106. goto error;
  107. if (desc) {
  108. flags |= PCI_IRQ_AFFINITY;
  109. desc->pre_vectors++; /* virtio config vector */
  110. }
  111. err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
  112. nvectors, flags, desc);
  113. if (err < 0)
  114. goto error;
  115. vp_dev->msix_enabled = 1;
  116. /* Set the vector used for configuration */
  117. v = vp_dev->msix_used_vectors;
  118. snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
  119. "%s-config", name);
  120. err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
  121. vp_config_changed, 0, vp_dev->msix_names[v],
  122. vp_dev);
  123. if (err)
  124. goto error;
  125. ++vp_dev->msix_used_vectors;
  126. v = vp_dev->config_vector(vp_dev, v);
  127. /* Verify we had enough resources to assign the vector */
  128. if (v == VIRTIO_MSI_NO_VECTOR) {
  129. err = -EBUSY;
  130. goto error;
  131. }
  132. if (!per_vq_vectors) {
  133. /* Shared vector for all VQs */
  134. v = vp_dev->msix_used_vectors;
  135. snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
  136. "%s-virtqueues", name);
  137. err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
  138. vp_vring_interrupt, 0, vp_dev->msix_names[v],
  139. vp_dev);
  140. if (err)
  141. goto error;
  142. ++vp_dev->msix_used_vectors;
  143. }
  144. return 0;
  145. error:
  146. return err;
  147. }
  148. static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
  149. void (*callback)(struct virtqueue *vq),
  150. const char *name,
  151. bool ctx,
  152. u16 msix_vec)
  153. {
  154. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  155. struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
  156. struct virtqueue *vq;
  157. unsigned long flags;
  158. /* fill out our structure that represents an active queue */
  159. if (!info)
  160. return ERR_PTR(-ENOMEM);
  161. vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
  162. msix_vec);
  163. if (IS_ERR(vq))
  164. goto out_info;
  165. info->vq = vq;
  166. if (callback) {
  167. spin_lock_irqsave(&vp_dev->lock, flags);
  168. list_add(&info->node, &vp_dev->virtqueues);
  169. spin_unlock_irqrestore(&vp_dev->lock, flags);
  170. } else {
  171. INIT_LIST_HEAD(&info->node);
  172. }
  173. vp_dev->vqs[index] = info;
  174. return vq;
  175. out_info:
  176. kfree(info);
  177. return vq;
  178. }
  179. static void vp_del_vq(struct virtqueue *vq)
  180. {
  181. struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
  182. struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
  183. unsigned long flags;
  184. /*
  185. * If it fails during re-enable reset vq. This way we won't rejoin
  186. * info->node to the queue. Prevent unexpected irqs.
  187. */
  188. if (!vq->reset) {
  189. spin_lock_irqsave(&vp_dev->lock, flags);
  190. list_del(&info->node);
  191. spin_unlock_irqrestore(&vp_dev->lock, flags);
  192. }
  193. vp_dev->del_vq(info);
  194. kfree(info);
  195. }
  196. /* the config->del_vqs() implementation */
  197. void vp_del_vqs(struct virtio_device *vdev)
  198. {
  199. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  200. struct virtqueue *vq, *n;
  201. int i;
  202. list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
  203. if (vp_dev->per_vq_vectors) {
  204. int v = vp_dev->vqs[vq->index]->msix_vector;
  205. if (v != VIRTIO_MSI_NO_VECTOR) {
  206. int irq = pci_irq_vector(vp_dev->pci_dev, v);
  207. irq_set_affinity_hint(irq, NULL);
  208. free_irq(irq, vq);
  209. }
  210. }
  211. vp_del_vq(vq);
  212. }
  213. vp_dev->per_vq_vectors = false;
  214. if (vp_dev->intx_enabled) {
  215. free_irq(vp_dev->pci_dev->irq, vp_dev);
  216. vp_dev->intx_enabled = 0;
  217. }
  218. for (i = 0; i < vp_dev->msix_used_vectors; ++i)
  219. free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
  220. if (vp_dev->msix_affinity_masks) {
  221. for (i = 0; i < vp_dev->msix_vectors; i++)
  222. free_cpumask_var(vp_dev->msix_affinity_masks[i]);
  223. }
  224. if (vp_dev->msix_enabled) {
  225. /* Disable the vector used for configuration */
  226. vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
  227. pci_free_irq_vectors(vp_dev->pci_dev);
  228. vp_dev->msix_enabled = 0;
  229. }
  230. vp_dev->msix_vectors = 0;
  231. vp_dev->msix_used_vectors = 0;
  232. kfree(vp_dev->msix_names);
  233. vp_dev->msix_names = NULL;
  234. kfree(vp_dev->msix_affinity_masks);
  235. vp_dev->msix_affinity_masks = NULL;
  236. kfree(vp_dev->vqs);
  237. vp_dev->vqs = NULL;
  238. }
  239. static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
  240. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  241. const char * const names[], bool per_vq_vectors,
  242. const bool *ctx,
  243. struct irq_affinity *desc)
  244. {
  245. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  246. u16 msix_vec;
  247. int i, err, nvectors, allocated_vectors, queue_idx = 0;
  248. vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
  249. if (!vp_dev->vqs)
  250. return -ENOMEM;
  251. if (per_vq_vectors) {
  252. /* Best option: one for change interrupt, one per vq. */
  253. nvectors = 1;
  254. for (i = 0; i < nvqs; ++i)
  255. if (names[i] && callbacks[i])
  256. ++nvectors;
  257. } else {
  258. /* Second best: one for change, shared for all vqs. */
  259. nvectors = 2;
  260. }
  261. err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
  262. per_vq_vectors ? desc : NULL);
  263. if (err)
  264. goto error_find;
  265. vp_dev->per_vq_vectors = per_vq_vectors;
  266. allocated_vectors = vp_dev->msix_used_vectors;
  267. for (i = 0; i < nvqs; ++i) {
  268. if (!names[i]) {
  269. vqs[i] = NULL;
  270. continue;
  271. }
  272. if (!callbacks[i])
  273. msix_vec = VIRTIO_MSI_NO_VECTOR;
  274. else if (vp_dev->per_vq_vectors)
  275. msix_vec = allocated_vectors++;
  276. else
  277. msix_vec = VP_MSIX_VQ_VECTOR;
  278. vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  279. ctx ? ctx[i] : false,
  280. msix_vec);
  281. if (IS_ERR(vqs[i])) {
  282. err = PTR_ERR(vqs[i]);
  283. goto error_find;
  284. }
  285. if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
  286. continue;
  287. /* allocate per-vq irq if available and necessary */
  288. snprintf(vp_dev->msix_names[msix_vec],
  289. sizeof *vp_dev->msix_names,
  290. "%s-%s",
  291. dev_name(&vp_dev->vdev.dev), names[i]);
  292. err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
  293. vring_interrupt, 0,
  294. vp_dev->msix_names[msix_vec],
  295. vqs[i]);
  296. if (err)
  297. goto error_find;
  298. }
  299. return 0;
  300. error_find:
  301. vp_del_vqs(vdev);
  302. return err;
  303. }
  304. static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
  305. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  306. const char * const names[], const bool *ctx)
  307. {
  308. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  309. int i, err, queue_idx = 0;
  310. vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
  311. if (!vp_dev->vqs)
  312. return -ENOMEM;
  313. err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
  314. dev_name(&vdev->dev), vp_dev);
  315. if (err)
  316. goto out_del_vqs;
  317. vp_dev->intx_enabled = 1;
  318. vp_dev->per_vq_vectors = false;
  319. for (i = 0; i < nvqs; ++i) {
  320. if (!names[i]) {
  321. vqs[i] = NULL;
  322. continue;
  323. }
  324. vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  325. ctx ? ctx[i] : false,
  326. VIRTIO_MSI_NO_VECTOR);
  327. if (IS_ERR(vqs[i])) {
  328. err = PTR_ERR(vqs[i]);
  329. goto out_del_vqs;
  330. }
  331. }
  332. return 0;
  333. out_del_vqs:
  334. vp_del_vqs(vdev);
  335. return err;
  336. }
  337. /* the config->find_vqs() implementation */
  338. int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
  339. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  340. const char * const names[], const bool *ctx,
  341. struct irq_affinity *desc)
  342. {
  343. int err;
  344. /* Try MSI-X with one vector per queue. */
  345. err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
  346. if (!err)
  347. return 0;
  348. /* Fallback: MSI-X with one vector for config, one shared for queues. */
  349. err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
  350. if (!err)
  351. return 0;
  352. /* Is there an interrupt? If not give up. */
  353. if (!(to_vp_device(vdev)->pci_dev->irq))
  354. return err;
  355. /* Finally fall back to regular interrupts. */
  356. return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
  357. }
  358. const char *vp_bus_name(struct virtio_device *vdev)
  359. {
  360. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  361. return pci_name(vp_dev->pci_dev);
  362. }
  363. /* Setup the affinity for a virtqueue:
  364. * - force the affinity for per vq vector
  365. * - OR over all affinities for shared MSI
  366. * - ignore the affinity request if we're using INTX
  367. */
  368. int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
  369. {
  370. struct virtio_device *vdev = vq->vdev;
  371. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  372. struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
  373. struct cpumask *mask;
  374. unsigned int irq;
  375. if (!vq->callback)
  376. return -EINVAL;
  377. if (vp_dev->msix_enabled) {
  378. mask = vp_dev->msix_affinity_masks[info->msix_vector];
  379. irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
  380. if (!cpu_mask)
  381. irq_set_affinity_hint(irq, NULL);
  382. else {
  383. cpumask_copy(mask, cpu_mask);
  384. irq_set_affinity_hint(irq, mask);
  385. }
  386. }
  387. return 0;
  388. }
  389. const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
  390. {
  391. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  392. if (!vp_dev->per_vq_vectors ||
  393. vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
  394. return NULL;
  395. return pci_irq_get_affinity(vp_dev->pci_dev,
  396. vp_dev->vqs[index]->msix_vector);
  397. }
  398. #ifdef CONFIG_PM_SLEEP
  399. static int virtio_pci_freeze(struct device *dev)
  400. {
  401. struct pci_dev *pci_dev = to_pci_dev(dev);
  402. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  403. int ret;
  404. ret = virtio_device_freeze(&vp_dev->vdev);
  405. if (!ret)
  406. pci_disable_device(pci_dev);
  407. return ret;
  408. }
  409. static int virtio_pci_restore(struct device *dev)
  410. {
  411. struct pci_dev *pci_dev = to_pci_dev(dev);
  412. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  413. int ret;
  414. ret = pci_enable_device(pci_dev);
  415. if (ret)
  416. return ret;
  417. pci_set_master(pci_dev);
  418. return virtio_device_restore(&vp_dev->vdev);
  419. }
  420. static bool vp_supports_pm_no_reset(struct device *dev)
  421. {
  422. struct pci_dev *pci_dev = to_pci_dev(dev);
  423. u16 pmcsr;
  424. if (!pci_dev->pm_cap)
  425. return false;
  426. pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
  427. if (PCI_POSSIBLE_ERROR(pmcsr)) {
  428. dev_err(dev, "Unable to query pmcsr");
  429. return false;
  430. }
  431. return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
  432. }
  433. static int virtio_pci_suspend(struct device *dev)
  434. {
  435. return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
  436. }
  437. static int virtio_pci_resume(struct device *dev)
  438. {
  439. return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
  440. }
  441. static const struct dev_pm_ops virtio_pci_pm_ops = {
  442. .suspend = virtio_pci_suspend,
  443. .resume = virtio_pci_resume,
  444. .freeze = virtio_pci_freeze,
  445. .thaw = virtio_pci_restore,
  446. .poweroff = virtio_pci_freeze,
  447. .restore = virtio_pci_restore,
  448. };
  449. #endif
  450. /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
  451. static const struct pci_device_id virtio_pci_id_table[] = {
  452. { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
  453. { 0 }
  454. };
  455. MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
  456. static void virtio_pci_release_dev(struct device *_d)
  457. {
  458. struct virtio_device *vdev = dev_to_virtio(_d);
  459. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  460. /* As struct device is a kobject, it's not safe to
  461. * free the memory (including the reference counter itself)
  462. * until it's release callback. */
  463. kfree(vp_dev);
  464. }
  465. static int virtio_pci_probe(struct pci_dev *pci_dev,
  466. const struct pci_device_id *id)
  467. {
  468. struct virtio_pci_device *vp_dev, *reg_dev = NULL;
  469. int rc;
  470. /* allocate our structure and fill it out */
  471. vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
  472. if (!vp_dev)
  473. return -ENOMEM;
  474. pci_set_drvdata(pci_dev, vp_dev);
  475. vp_dev->vdev.dev.parent = &pci_dev->dev;
  476. vp_dev->vdev.dev.release = virtio_pci_release_dev;
  477. vp_dev->pci_dev = pci_dev;
  478. INIT_LIST_HEAD(&vp_dev->virtqueues);
  479. spin_lock_init(&vp_dev->lock);
  480. /* enable the device */
  481. rc = pci_enable_device(pci_dev);
  482. if (rc)
  483. goto err_enable_device;
  484. if (force_legacy) {
  485. rc = virtio_pci_legacy_probe(vp_dev);
  486. /* Also try modern mode if we can't map BAR0 (no IO space). */
  487. if (rc == -ENODEV || rc == -ENOMEM)
  488. rc = virtio_pci_modern_probe(vp_dev);
  489. if (rc)
  490. goto err_probe;
  491. } else {
  492. rc = virtio_pci_modern_probe(vp_dev);
  493. if (rc == -ENODEV)
  494. rc = virtio_pci_legacy_probe(vp_dev);
  495. if (rc)
  496. goto err_probe;
  497. }
  498. pci_set_master(pci_dev);
  499. vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
  500. rc = register_virtio_device(&vp_dev->vdev);
  501. reg_dev = vp_dev;
  502. if (rc)
  503. goto err_register;
  504. return 0;
  505. err_register:
  506. if (vp_dev->is_legacy)
  507. virtio_pci_legacy_remove(vp_dev);
  508. else
  509. virtio_pci_modern_remove(vp_dev);
  510. err_probe:
  511. pci_disable_device(pci_dev);
  512. err_enable_device:
  513. if (reg_dev)
  514. put_device(&vp_dev->vdev.dev);
  515. else
  516. kfree(vp_dev);
  517. return rc;
  518. }
  519. static void virtio_pci_remove(struct pci_dev *pci_dev)
  520. {
  521. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  522. struct device *dev = get_device(&vp_dev->vdev.dev);
  523. /*
  524. * Device is marked broken on surprise removal so that virtio upper
  525. * layers can abort any ongoing operation.
  526. */
  527. if (!pci_device_is_present(pci_dev))
  528. virtio_break_device(&vp_dev->vdev);
  529. pci_disable_sriov(pci_dev);
  530. unregister_virtio_device(&vp_dev->vdev);
  531. if (vp_dev->is_legacy)
  532. virtio_pci_legacy_remove(vp_dev);
  533. else
  534. virtio_pci_modern_remove(vp_dev);
  535. pci_disable_device(pci_dev);
  536. put_device(dev);
  537. }
  538. static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
  539. {
  540. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  541. struct virtio_device *vdev = &vp_dev->vdev;
  542. int ret;
  543. if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
  544. return -EBUSY;
  545. if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
  546. return -EINVAL;
  547. if (pci_vfs_assigned(pci_dev))
  548. return -EPERM;
  549. if (num_vfs == 0) {
  550. pci_disable_sriov(pci_dev);
  551. return 0;
  552. }
  553. ret = pci_enable_sriov(pci_dev, num_vfs);
  554. if (ret < 0)
  555. return ret;
  556. return num_vfs;
  557. }
  558. static struct pci_driver virtio_pci_driver = {
  559. .name = "virtio-pci",
  560. .id_table = virtio_pci_id_table,
  561. .probe = virtio_pci_probe,
  562. .remove = virtio_pci_remove,
  563. #ifdef CONFIG_PM_SLEEP
  564. .driver.pm = &virtio_pci_pm_ops,
  565. #endif
  566. .sriov_configure = virtio_pci_sriov_configure,
  567. };
  568. module_pci_driver(virtio_pci_driver);
  569. MODULE_AUTHOR("Anthony Liguori <[email protected]>");
  570. MODULE_DESCRIPTION("virtio-pci");
  571. MODULE_LICENSE("GPL");
  572. MODULE_VERSION("1");