virtio_pci_legacy.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio PCI driver - legacy device support
  4. *
  5. * This module allows virtio devices to be used over a virtual PCI device.
  6. * This can be used with QEMU based VMMs like KVM or Xen.
  7. *
  8. * Copyright IBM Corp. 2007
  9. * Copyright Red Hat, Inc. 2014
  10. *
  11. * Authors:
  12. * Anthony Liguori <[email protected]>
  13. * Rusty Russell <[email protected]>
  14. * Michael S. Tsirkin <[email protected]>
  15. */
  16. #include "linux/virtio_pci_legacy.h"
  17. #include "virtio_pci_common.h"
  18. /* virtio config->get_features() implementation */
  19. static u64 vp_get_features(struct virtio_device *vdev)
  20. {
  21. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  22. /* When someone needs more than 32 feature bits, we'll need to
  23. * steal a bit to indicate that the rest are somewhere else. */
  24. return vp_legacy_get_features(&vp_dev->ldev);
  25. }
  26. /* virtio config->finalize_features() implementation */
  27. static int vp_finalize_features(struct virtio_device *vdev)
  28. {
  29. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  30. /* Give virtio_ring a chance to accept features. */
  31. vring_transport_features(vdev);
  32. /* Make sure we don't have any features > 32 bits! */
  33. BUG_ON((u32)vdev->features != vdev->features);
  34. /* We only support 32 feature bits. */
  35. vp_legacy_set_features(&vp_dev->ldev, vdev->features);
  36. return 0;
  37. }
  38. /* virtio config->get() implementation */
  39. static void vp_get(struct virtio_device *vdev, unsigned int offset,
  40. void *buf, unsigned int len)
  41. {
  42. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  43. void __iomem *ioaddr = vp_dev->ldev.ioaddr +
  44. VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
  45. offset;
  46. u8 *ptr = buf;
  47. int i;
  48. for (i = 0; i < len; i++)
  49. ptr[i] = ioread8(ioaddr + i);
  50. }
  51. /* the config->set() implementation. it's symmetric to the config->get()
  52. * implementation */
  53. static void vp_set(struct virtio_device *vdev, unsigned int offset,
  54. const void *buf, unsigned int len)
  55. {
  56. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  57. void __iomem *ioaddr = vp_dev->ldev.ioaddr +
  58. VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
  59. offset;
  60. const u8 *ptr = buf;
  61. int i;
  62. for (i = 0; i < len; i++)
  63. iowrite8(ptr[i], ioaddr + i);
  64. }
  65. /* config->{get,set}_status() implementations */
  66. static u8 vp_get_status(struct virtio_device *vdev)
  67. {
  68. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  69. return vp_legacy_get_status(&vp_dev->ldev);
  70. }
  71. static void vp_set_status(struct virtio_device *vdev, u8 status)
  72. {
  73. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  74. /* We should never be setting status to 0. */
  75. BUG_ON(status == 0);
  76. vp_legacy_set_status(&vp_dev->ldev, status);
  77. }
  78. static void vp_reset(struct virtio_device *vdev)
  79. {
  80. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  81. /* 0 status means a reset. */
  82. vp_legacy_set_status(&vp_dev->ldev, 0);
  83. /* Flush out the status write, and flush in device writes,
  84. * including MSi-X interrupts, if any. */
  85. vp_legacy_get_status(&vp_dev->ldev);
  86. /* Flush pending VQ/configuration callbacks. */
  87. vp_synchronize_vectors(vdev);
  88. }
  89. static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  90. {
  91. return vp_legacy_config_vector(&vp_dev->ldev, vector);
  92. }
  93. static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
  94. struct virtio_pci_vq_info *info,
  95. unsigned int index,
  96. void (*callback)(struct virtqueue *vq),
  97. const char *name,
  98. bool ctx,
  99. u16 msix_vec)
  100. {
  101. struct virtqueue *vq;
  102. u16 num;
  103. int err;
  104. u64 q_pfn;
  105. /* Check if queue is either not available or already active. */
  106. num = vp_legacy_get_queue_size(&vp_dev->ldev, index);
  107. if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
  108. return ERR_PTR(-ENOENT);
  109. info->msix_vector = msix_vec;
  110. /* create the vring */
  111. vq = vring_create_virtqueue(index, num,
  112. VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
  113. true, false, ctx,
  114. vp_notify, callback, name);
  115. if (!vq)
  116. return ERR_PTR(-ENOMEM);
  117. vq->num_max = num;
  118. q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
  119. if (q_pfn >> 32) {
  120. dev_err(&vp_dev->pci_dev->dev,
  121. "platform bug: legacy virtio-pci must not be used with RAM above 0x%llxGB\n",
  122. 0x1ULL << (32 + PAGE_SHIFT - 30));
  123. err = -E2BIG;
  124. goto out_del_vq;
  125. }
  126. /* activate the queue */
  127. vp_legacy_set_queue_address(&vp_dev->ldev, index, q_pfn);
  128. vq->priv = (void __force *)vp_dev->ldev.ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
  129. if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
  130. msix_vec = vp_legacy_queue_vector(&vp_dev->ldev, index, msix_vec);
  131. if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
  132. err = -EBUSY;
  133. goto out_deactivate;
  134. }
  135. }
  136. return vq;
  137. out_deactivate:
  138. vp_legacy_set_queue_address(&vp_dev->ldev, index, 0);
  139. out_del_vq:
  140. vring_del_virtqueue(vq);
  141. return ERR_PTR(err);
  142. }
  143. static void del_vq(struct virtio_pci_vq_info *info)
  144. {
  145. struct virtqueue *vq = info->vq;
  146. struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
  147. if (vp_dev->msix_enabled) {
  148. vp_legacy_queue_vector(&vp_dev->ldev, vq->index,
  149. VIRTIO_MSI_NO_VECTOR);
  150. /* Flush the write out to device */
  151. ioread8(vp_dev->ldev.ioaddr + VIRTIO_PCI_ISR);
  152. }
  153. /* Select and deactivate the queue */
  154. vp_legacy_set_queue_address(&vp_dev->ldev, vq->index, 0);
  155. vring_del_virtqueue(vq);
  156. }
  157. static const struct virtio_config_ops virtio_pci_config_ops = {
  158. .get = vp_get,
  159. .set = vp_set,
  160. .get_status = vp_get_status,
  161. .set_status = vp_set_status,
  162. .reset = vp_reset,
  163. .find_vqs = vp_find_vqs,
  164. .del_vqs = vp_del_vqs,
  165. .synchronize_cbs = vp_synchronize_vectors,
  166. .get_features = vp_get_features,
  167. .finalize_features = vp_finalize_features,
  168. .bus_name = vp_bus_name,
  169. .set_vq_affinity = vp_set_vq_affinity,
  170. .get_vq_affinity = vp_get_vq_affinity,
  171. };
  172. /* the PCI probing function */
  173. int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
  174. {
  175. struct virtio_pci_legacy_device *ldev = &vp_dev->ldev;
  176. struct pci_dev *pci_dev = vp_dev->pci_dev;
  177. int rc;
  178. ldev->pci_dev = pci_dev;
  179. rc = vp_legacy_probe(ldev);
  180. if (rc)
  181. return rc;
  182. vp_dev->isr = ldev->isr;
  183. vp_dev->vdev.id = ldev->id;
  184. vp_dev->vdev.config = &virtio_pci_config_ops;
  185. vp_dev->config_vector = vp_config_vector;
  186. vp_dev->setup_vq = setup_vq;
  187. vp_dev->del_vq = del_vq;
  188. return 0;
  189. }
  190. void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
  191. {
  192. struct virtio_pci_legacy_device *ldev = &vp_dev->ldev;
  193. vp_legacy_remove(ldev);
  194. }