pci.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2009, Intel Corporation.
  4. *
  5. * Author: Weidong Han <[email protected]>
  6. */
  7. #include <linux/pci.h>
  8. #include <linux/acpi.h>
  9. #include <linux/pci-acpi.h>
  10. #include <xen/pci.h>
  11. #include <xen/xen.h>
  12. #include <xen/interface/physdev.h>
  13. #include <xen/interface/xen.h>
  14. #include <asm/xen/hypervisor.h>
  15. #include <asm/xen/hypercall.h>
  16. #include "../pci/pci.h"
  17. #ifdef CONFIG_PCI_MMCONFIG
  18. #include <asm/pci_x86.h>
  19. static int xen_mcfg_late(void);
  20. #endif
  21. static bool __read_mostly pci_seg_supported = true;
  22. static int xen_add_device(struct device *dev)
  23. {
  24. int r;
  25. struct pci_dev *pci_dev = to_pci_dev(dev);
  26. #ifdef CONFIG_PCI_IOV
  27. struct pci_dev *physfn = pci_dev->physfn;
  28. #endif
  29. #ifdef CONFIG_PCI_MMCONFIG
  30. static bool pci_mcfg_reserved = false;
  31. /*
  32. * Reserve MCFG areas in Xen on first invocation due to this being
  33. * potentially called from inside of acpi_init immediately after
  34. * MCFG table has been finally parsed.
  35. */
  36. if (!pci_mcfg_reserved) {
  37. xen_mcfg_late();
  38. pci_mcfg_reserved = true;
  39. }
  40. #endif
  41. if (pci_seg_supported) {
  42. struct {
  43. struct physdev_pci_device_add add;
  44. uint32_t pxm;
  45. } add_ext = {
  46. .add.seg = pci_domain_nr(pci_dev->bus),
  47. .add.bus = pci_dev->bus->number,
  48. .add.devfn = pci_dev->devfn
  49. };
  50. struct physdev_pci_device_add *add = &add_ext.add;
  51. #ifdef CONFIG_ACPI
  52. acpi_handle handle;
  53. #endif
  54. #ifdef CONFIG_PCI_IOV
  55. if (pci_dev->is_virtfn) {
  56. add->flags = XEN_PCI_DEV_VIRTFN;
  57. add->physfn.bus = physfn->bus->number;
  58. add->physfn.devfn = physfn->devfn;
  59. } else
  60. #endif
  61. if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
  62. add->flags = XEN_PCI_DEV_EXTFN;
  63. #ifdef CONFIG_ACPI
  64. handle = ACPI_HANDLE(&pci_dev->dev);
  65. #ifdef CONFIG_PCI_IOV
  66. if (!handle && pci_dev->is_virtfn)
  67. handle = ACPI_HANDLE(physfn->bus->bridge);
  68. #endif
  69. if (!handle) {
  70. /*
  71. * This device was not listed in the ACPI name space at
  72. * all. Try to get acpi handle of parent pci bus.
  73. */
  74. struct pci_bus *pbus;
  75. for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
  76. handle = acpi_pci_get_bridge_handle(pbus);
  77. if (handle)
  78. break;
  79. }
  80. }
  81. if (handle) {
  82. acpi_status status;
  83. do {
  84. unsigned long long pxm;
  85. status = acpi_evaluate_integer(handle, "_PXM",
  86. NULL, &pxm);
  87. if (ACPI_SUCCESS(status)) {
  88. add->optarr[0] = pxm;
  89. add->flags |= XEN_PCI_DEV_PXM;
  90. break;
  91. }
  92. status = acpi_get_parent(handle, &handle);
  93. } while (ACPI_SUCCESS(status));
  94. }
  95. #endif /* CONFIG_ACPI */
  96. r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
  97. if (r != -ENOSYS)
  98. return r;
  99. pci_seg_supported = false;
  100. }
  101. if (pci_domain_nr(pci_dev->bus))
  102. r = -ENOSYS;
  103. #ifdef CONFIG_PCI_IOV
  104. else if (pci_dev->is_virtfn) {
  105. struct physdev_manage_pci_ext manage_pci_ext = {
  106. .bus = pci_dev->bus->number,
  107. .devfn = pci_dev->devfn,
  108. .is_virtfn = 1,
  109. .physfn.bus = physfn->bus->number,
  110. .physfn.devfn = physfn->devfn,
  111. };
  112. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
  113. &manage_pci_ext);
  114. }
  115. #endif
  116. else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
  117. struct physdev_manage_pci_ext manage_pci_ext = {
  118. .bus = pci_dev->bus->number,
  119. .devfn = pci_dev->devfn,
  120. .is_extfn = 1,
  121. };
  122. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
  123. &manage_pci_ext);
  124. } else {
  125. struct physdev_manage_pci manage_pci = {
  126. .bus = pci_dev->bus->number,
  127. .devfn = pci_dev->devfn,
  128. };
  129. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
  130. &manage_pci);
  131. }
  132. return r;
  133. }
  134. static int xen_remove_device(struct device *dev)
  135. {
  136. int r;
  137. struct pci_dev *pci_dev = to_pci_dev(dev);
  138. if (pci_seg_supported) {
  139. struct physdev_pci_device device = {
  140. .seg = pci_domain_nr(pci_dev->bus),
  141. .bus = pci_dev->bus->number,
  142. .devfn = pci_dev->devfn
  143. };
  144. r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
  145. &device);
  146. } else if (pci_domain_nr(pci_dev->bus))
  147. r = -ENOSYS;
  148. else {
  149. struct physdev_manage_pci manage_pci = {
  150. .bus = pci_dev->bus->number,
  151. .devfn = pci_dev->devfn
  152. };
  153. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
  154. &manage_pci);
  155. }
  156. return r;
  157. }
  158. static int xen_pci_notifier(struct notifier_block *nb,
  159. unsigned long action, void *data)
  160. {
  161. struct device *dev = data;
  162. int r = 0;
  163. switch (action) {
  164. case BUS_NOTIFY_ADD_DEVICE:
  165. r = xen_add_device(dev);
  166. break;
  167. case BUS_NOTIFY_DEL_DEVICE:
  168. r = xen_remove_device(dev);
  169. break;
  170. default:
  171. return NOTIFY_DONE;
  172. }
  173. if (r)
  174. dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
  175. action == BUS_NOTIFY_ADD_DEVICE ? "add" :
  176. (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
  177. return NOTIFY_OK;
  178. }
  179. static struct notifier_block device_nb = {
  180. .notifier_call = xen_pci_notifier,
  181. };
  182. static int __init register_xen_pci_notifier(void)
  183. {
  184. if (!xen_initial_domain())
  185. return 0;
  186. return bus_register_notifier(&pci_bus_type, &device_nb);
  187. }
  188. arch_initcall(register_xen_pci_notifier);
  189. #ifdef CONFIG_PCI_MMCONFIG
  190. static int xen_mcfg_late(void)
  191. {
  192. struct pci_mmcfg_region *cfg;
  193. int rc;
  194. if (!xen_initial_domain())
  195. return 0;
  196. if ((pci_probe & PCI_PROBE_MMCONF) == 0)
  197. return 0;
  198. if (list_empty(&pci_mmcfg_list))
  199. return 0;
  200. /* Check whether they are in the right area. */
  201. list_for_each_entry(cfg, &pci_mmcfg_list, list) {
  202. struct physdev_pci_mmcfg_reserved r;
  203. r.address = cfg->address;
  204. r.segment = cfg->segment;
  205. r.start_bus = cfg->start_bus;
  206. r.end_bus = cfg->end_bus;
  207. r.flags = XEN_PCI_MMCFG_RESERVED;
  208. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
  209. switch (rc) {
  210. case 0:
  211. case -ENOSYS:
  212. continue;
  213. default:
  214. pr_warn("Failed to report MMCONFIG reservation"
  215. " state for %s to hypervisor"
  216. " (%d)\n",
  217. cfg->name, rc);
  218. }
  219. }
  220. return 0;
  221. }
  222. #endif
  223. #ifdef CONFIG_XEN_DOM0
  224. struct xen_device_domain_owner {
  225. domid_t domain;
  226. struct pci_dev *dev;
  227. struct list_head list;
  228. };
  229. static DEFINE_SPINLOCK(dev_domain_list_spinlock);
  230. static LIST_HEAD(dev_domain_list);
  231. static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
  232. {
  233. struct xen_device_domain_owner *owner;
  234. list_for_each_entry(owner, &dev_domain_list, list) {
  235. if (owner->dev == dev)
  236. return owner;
  237. }
  238. return NULL;
  239. }
  240. int xen_find_device_domain_owner(struct pci_dev *dev)
  241. {
  242. struct xen_device_domain_owner *owner;
  243. int domain = -ENODEV;
  244. spin_lock(&dev_domain_list_spinlock);
  245. owner = find_device(dev);
  246. if (owner)
  247. domain = owner->domain;
  248. spin_unlock(&dev_domain_list_spinlock);
  249. return domain;
  250. }
  251. EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
  252. int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
  253. {
  254. struct xen_device_domain_owner *owner;
  255. owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
  256. if (!owner)
  257. return -ENODEV;
  258. spin_lock(&dev_domain_list_spinlock);
  259. if (find_device(dev)) {
  260. spin_unlock(&dev_domain_list_spinlock);
  261. kfree(owner);
  262. return -EEXIST;
  263. }
  264. owner->domain = domain;
  265. owner->dev = dev;
  266. list_add_tail(&owner->list, &dev_domain_list);
  267. spin_unlock(&dev_domain_list_spinlock);
  268. return 0;
  269. }
  270. EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
  271. int xen_unregister_device_domain_owner(struct pci_dev *dev)
  272. {
  273. struct xen_device_domain_owner *owner;
  274. spin_lock(&dev_domain_list_spinlock);
  275. owner = find_device(dev);
  276. if (!owner) {
  277. spin_unlock(&dev_domain_list_spinlock);
  278. return -ENODEV;
  279. }
  280. list_del(&owner->list);
  281. spin_unlock(&dev_domain_list_spinlock);
  282. kfree(owner);
  283. return 0;
  284. }
  285. EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
  286. #endif