vpci.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI Backend - Provides a Virtual PCI bus (with real devices)
  4. * to the frontend
  5. *
  6. * Author: Ryan Wilson <[email protected]>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #define dev_fmt pr_fmt
  10. #include <linux/list.h>
  11. #include <linux/slab.h>
  12. #include <linux/pci.h>
  13. #include <linux/mutex.h>
  14. #include "pciback.h"
  15. #define PCI_SLOT_MAX 32
  16. struct vpci_dev_data {
  17. /* Access to dev_list must be protected by lock */
  18. struct list_head dev_list[PCI_SLOT_MAX];
  19. struct mutex lock;
  20. };
  21. static inline struct list_head *list_first(struct list_head *head)
  22. {
  23. return head->next;
  24. }
  25. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  26. unsigned int domain,
  27. unsigned int bus,
  28. unsigned int devfn)
  29. {
  30. struct pci_dev_entry *entry;
  31. struct pci_dev *dev = NULL;
  32. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  33. if (domain != 0 || bus != 0)
  34. return NULL;
  35. if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
  36. mutex_lock(&vpci_dev->lock);
  37. list_for_each_entry(entry,
  38. &vpci_dev->dev_list[PCI_SLOT(devfn)],
  39. list) {
  40. if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
  41. dev = entry->dev;
  42. break;
  43. }
  44. }
  45. mutex_unlock(&vpci_dev->lock);
  46. }
  47. return dev;
  48. }
  49. static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
  50. {
  51. if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
  52. && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
  53. return 1;
  54. return 0;
  55. }
  56. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  57. struct pci_dev *dev, int devid,
  58. publish_pci_dev_cb publish_cb)
  59. {
  60. int err = 0, slot, func = PCI_FUNC(dev->devfn);
  61. struct pci_dev_entry *t, *dev_entry;
  62. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  63. if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
  64. err = -EFAULT;
  65. xenbus_dev_fatal(pdev->xdev, err,
  66. "Can't export bridges on the virtual PCI bus");
  67. goto out;
  68. }
  69. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  70. if (!dev_entry) {
  71. err = -ENOMEM;
  72. xenbus_dev_fatal(pdev->xdev, err,
  73. "Error adding entry to virtual PCI bus");
  74. goto out;
  75. }
  76. dev_entry->dev = dev;
  77. mutex_lock(&vpci_dev->lock);
  78. /*
  79. * Keep multi-function devices together on the virtual PCI bus, except
  80. * that we want to keep virtual functions at func 0 on their own. They
  81. * aren't multi-function devices and hence their presence at func 0
  82. * may cause guests to not scan the other functions.
  83. */
  84. if (!dev->is_virtfn || func) {
  85. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  86. if (list_empty(&vpci_dev->dev_list[slot]))
  87. continue;
  88. t = list_entry(list_first(&vpci_dev->dev_list[slot]),
  89. struct pci_dev_entry, list);
  90. if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
  91. continue;
  92. if (match_slot(dev, t->dev)) {
  93. dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
  94. slot, func);
  95. list_add_tail(&dev_entry->list,
  96. &vpci_dev->dev_list[slot]);
  97. goto unlock;
  98. }
  99. }
  100. }
  101. /* Assign to a new slot on the virtual PCI bus */
  102. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  103. if (list_empty(&vpci_dev->dev_list[slot])) {
  104. dev_info(&dev->dev, "vpci: assign to virtual slot %d\n",
  105. slot);
  106. list_add_tail(&dev_entry->list,
  107. &vpci_dev->dev_list[slot]);
  108. goto unlock;
  109. }
  110. }
  111. err = -ENOMEM;
  112. xenbus_dev_fatal(pdev->xdev, err,
  113. "No more space on root virtual PCI bus");
  114. unlock:
  115. mutex_unlock(&vpci_dev->lock);
  116. /* Publish this device. */
  117. if (!err)
  118. err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
  119. else
  120. kfree(dev_entry);
  121. out:
  122. return err;
  123. }
  124. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  125. struct pci_dev *dev, bool lock)
  126. {
  127. int slot;
  128. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  129. struct pci_dev *found_dev = NULL;
  130. mutex_lock(&vpci_dev->lock);
  131. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  132. struct pci_dev_entry *e;
  133. list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
  134. if (e->dev == dev) {
  135. list_del(&e->list);
  136. found_dev = e->dev;
  137. kfree(e);
  138. goto out;
  139. }
  140. }
  141. }
  142. out:
  143. mutex_unlock(&vpci_dev->lock);
  144. if (found_dev) {
  145. if (lock)
  146. device_lock(&found_dev->dev);
  147. pcistub_put_pci_dev(found_dev);
  148. if (lock)
  149. device_unlock(&found_dev->dev);
  150. }
  151. }
  152. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  153. {
  154. int slot;
  155. struct vpci_dev_data *vpci_dev;
  156. vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
  157. if (!vpci_dev)
  158. return -ENOMEM;
  159. mutex_init(&vpci_dev->lock);
  160. for (slot = 0; slot < PCI_SLOT_MAX; slot++)
  161. INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
  162. pdev->pci_dev_data = vpci_dev;
  163. return 0;
  164. }
  165. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  166. publish_pci_root_cb publish_cb)
  167. {
  168. /* The Virtual PCI bus has only one root */
  169. return publish_cb(pdev, 0, 0);
  170. }
  171. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  172. {
  173. int slot;
  174. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  175. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  176. struct pci_dev_entry *e, *tmp;
  177. list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
  178. list) {
  179. struct pci_dev *dev = e->dev;
  180. list_del(&e->list);
  181. device_lock(&dev->dev);
  182. pcistub_put_pci_dev(dev);
  183. device_unlock(&dev->dev);
  184. kfree(e);
  185. }
  186. }
  187. kfree(vpci_dev);
  188. pdev->pci_dev_data = NULL;
  189. }
  190. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  191. struct xen_pcibk_device *pdev,
  192. unsigned int *domain, unsigned int *bus,
  193. unsigned int *devfn)
  194. {
  195. struct pci_dev_entry *entry;
  196. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  197. int found = 0, slot;
  198. mutex_lock(&vpci_dev->lock);
  199. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  200. list_for_each_entry(entry,
  201. &vpci_dev->dev_list[slot],
  202. list) {
  203. if (entry->dev == pcidev) {
  204. found = 1;
  205. *domain = 0;
  206. *bus = 0;
  207. *devfn = PCI_DEVFN(slot,
  208. PCI_FUNC(pcidev->devfn));
  209. }
  210. }
  211. }
  212. mutex_unlock(&vpci_dev->lock);
  213. return found;
  214. }
  215. const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
  216. .name = "vpci",
  217. .init = __xen_pcibk_init_devices,
  218. .free = __xen_pcibk_release_devices,
  219. .find = __xen_pcibk_get_pcifront_dev,
  220. .publish = __xen_pcibk_publish_pci_roots,
  221. .release = __xen_pcibk_release_pci_dev,
  222. .add = __xen_pcibk_add_pci_dev,
  223. .get = __xen_pcibk_get_pci_dev,
  224. };