pci_stub.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714
  1. /*
  2. * PCI Stub Driver - Grabs devices in backend to be exported later
  3. *
  4. * Ryan Wilson <[email protected]>
  5. * Chris Bookholt <[email protected]>
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #define dev_fmt pr_fmt
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/rwsem.h>
  12. #include <linux/list.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/kref.h>
  15. #include <linux/pci.h>
  16. #include <linux/wait.h>
  17. #include <linux/sched.h>
  18. #include <linux/atomic.h>
  19. #include <xen/events.h>
  20. #include <xen/pci.h>
  21. #include <xen/xen.h>
  22. #include <asm/xen/hypervisor.h>
  23. #include <xen/interface/physdev.h>
  24. #include "pciback.h"
  25. #include "conf_space.h"
  26. #include "conf_space_quirks.h"
  27. #define PCISTUB_DRIVER_NAME "pciback"
  28. static char *pci_devs_to_hide;
  29. wait_queue_head_t xen_pcibk_aer_wait_queue;
  30. /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
  31. * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
  32. */
  33. static DECLARE_RWSEM(pcistub_sem);
  34. module_param_named(hide, pci_devs_to_hide, charp, 0444);
  35. struct pcistub_device_id {
  36. struct list_head slot_list;
  37. int domain;
  38. unsigned char bus;
  39. unsigned int devfn;
  40. };
  41. static LIST_HEAD(pcistub_device_ids);
  42. static DEFINE_SPINLOCK(device_ids_lock);
  43. struct pcistub_device {
  44. struct kref kref;
  45. struct list_head dev_list;
  46. spinlock_t lock;
  47. struct pci_dev *dev;
  48. struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
  49. };
  50. /* Access to pcistub_devices & seized_devices lists and the initialize_devices
  51. * flag must be locked with pcistub_devices_lock
  52. */
  53. static DEFINE_SPINLOCK(pcistub_devices_lock);
  54. static LIST_HEAD(pcistub_devices);
  55. /* wait for device_initcall before initializing our devices
  56. * (see pcistub_init_devices_late)
  57. */
  58. static int initialize_devices;
  59. static LIST_HEAD(seized_devices);
  60. static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
  61. {
  62. struct pcistub_device *psdev;
  63. dev_dbg(&dev->dev, "pcistub_device_alloc\n");
  64. psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
  65. if (!psdev)
  66. return NULL;
  67. psdev->dev = pci_dev_get(dev);
  68. if (!psdev->dev) {
  69. kfree(psdev);
  70. return NULL;
  71. }
  72. kref_init(&psdev->kref);
  73. spin_lock_init(&psdev->lock);
  74. return psdev;
  75. }
  76. /* Don't call this directly as it's called by pcistub_device_put */
  77. static void pcistub_device_release(struct kref *kref)
  78. {
  79. struct pcistub_device *psdev;
  80. struct pci_dev *dev;
  81. struct xen_pcibk_dev_data *dev_data;
  82. psdev = container_of(kref, struct pcistub_device, kref);
  83. dev = psdev->dev;
  84. dev_data = pci_get_drvdata(dev);
  85. dev_dbg(&dev->dev, "pcistub_device_release\n");
  86. xen_unregister_device_domain_owner(dev);
  87. /* Call the reset function which does not take lock as this
  88. * is called from "unbind" which takes a device_lock mutex.
  89. */
  90. __pci_reset_function_locked(dev);
  91. if (dev_data &&
  92. pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
  93. dev_info(&dev->dev, "Could not reload PCI state\n");
  94. else
  95. pci_restore_state(dev);
  96. if (dev->msix_cap) {
  97. struct physdev_pci_device ppdev = {
  98. .seg = pci_domain_nr(dev->bus),
  99. .bus = dev->bus->number,
  100. .devfn = dev->devfn
  101. };
  102. int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
  103. &ppdev);
  104. if (err && err != -ENOSYS)
  105. dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
  106. err);
  107. }
  108. /* Disable the device */
  109. xen_pcibk_reset_device(dev);
  110. kfree(dev_data);
  111. pci_set_drvdata(dev, NULL);
  112. /* Clean-up the device */
  113. xen_pcibk_config_free_dyn_fields(dev);
  114. xen_pcibk_config_free_dev(dev);
  115. pci_clear_dev_assigned(dev);
  116. pci_dev_put(dev);
  117. kfree(psdev);
  118. }
  119. static inline void pcistub_device_get(struct pcistub_device *psdev)
  120. {
  121. kref_get(&psdev->kref);
  122. }
  123. static inline void pcistub_device_put(struct pcistub_device *psdev)
  124. {
  125. kref_put(&psdev->kref, pcistub_device_release);
  126. }
  127. static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
  128. int slot, int func)
  129. {
  130. struct pcistub_device *psdev;
  131. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  132. if (psdev->dev != NULL
  133. && domain == pci_domain_nr(psdev->dev->bus)
  134. && bus == psdev->dev->bus->number
  135. && slot == PCI_SLOT(psdev->dev->devfn)
  136. && func == PCI_FUNC(psdev->dev->devfn)) {
  137. return psdev;
  138. }
  139. }
  140. return NULL;
  141. }
  142. static struct pcistub_device *pcistub_device_find(int domain, int bus,
  143. int slot, int func)
  144. {
  145. struct pcistub_device *psdev;
  146. unsigned long flags;
  147. spin_lock_irqsave(&pcistub_devices_lock, flags);
  148. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  149. if (psdev)
  150. pcistub_device_get(psdev);
  151. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  152. return psdev;
  153. }
  154. static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
  155. struct pcistub_device *psdev)
  156. {
  157. struct pci_dev *pci_dev = NULL;
  158. unsigned long flags;
  159. pcistub_device_get(psdev);
  160. spin_lock_irqsave(&psdev->lock, flags);
  161. if (!psdev->pdev) {
  162. psdev->pdev = pdev;
  163. pci_dev = psdev->dev;
  164. }
  165. spin_unlock_irqrestore(&psdev->lock, flags);
  166. if (!pci_dev)
  167. pcistub_device_put(psdev);
  168. return pci_dev;
  169. }
  170. struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
  171. int domain, int bus,
  172. int slot, int func)
  173. {
  174. struct pcistub_device *psdev;
  175. struct pci_dev *found_dev = NULL;
  176. unsigned long flags;
  177. spin_lock_irqsave(&pcistub_devices_lock, flags);
  178. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  179. if (psdev)
  180. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  181. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  182. return found_dev;
  183. }
  184. struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
  185. struct pci_dev *dev)
  186. {
  187. struct pcistub_device *psdev;
  188. struct pci_dev *found_dev = NULL;
  189. unsigned long flags;
  190. spin_lock_irqsave(&pcistub_devices_lock, flags);
  191. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  192. if (psdev->dev == dev) {
  193. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  194. break;
  195. }
  196. }
  197. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  198. return found_dev;
  199. }
  200. /*
  201. * Called when:
  202. * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
  203. * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove
  204. * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove
  205. * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
  206. *
  207. * As such we have to be careful.
  208. *
  209. * To make this easier, the caller has to hold the device lock.
  210. */
  211. void pcistub_put_pci_dev(struct pci_dev *dev)
  212. {
  213. struct pcistub_device *psdev, *found_psdev = NULL;
  214. unsigned long flags;
  215. struct xen_pcibk_dev_data *dev_data;
  216. int ret;
  217. spin_lock_irqsave(&pcistub_devices_lock, flags);
  218. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  219. if (psdev->dev == dev) {
  220. found_psdev = psdev;
  221. break;
  222. }
  223. }
  224. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  225. if (WARN_ON(!found_psdev))
  226. return;
  227. /*hold this lock for avoiding breaking link between
  228. * pcistub and xen_pcibk when AER is in processing
  229. */
  230. down_write(&pcistub_sem);
  231. /* Cleanup our device
  232. * (so it's ready for the next domain)
  233. */
  234. device_lock_assert(&dev->dev);
  235. __pci_reset_function_locked(dev);
  236. dev_data = pci_get_drvdata(dev);
  237. ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
  238. if (!ret) {
  239. /*
  240. * The usual sequence is pci_save_state & pci_restore_state
  241. * but the guest might have messed the configuration space up.
  242. * Use the initial version (when device was bound to us).
  243. */
  244. pci_restore_state(dev);
  245. } else
  246. dev_info(&dev->dev, "Could not reload PCI state\n");
  247. /* This disables the device. */
  248. xen_pcibk_reset_device(dev);
  249. /* And cleanup up our emulated fields. */
  250. xen_pcibk_config_reset_dev(dev);
  251. xen_pcibk_config_free_dyn_fields(dev);
  252. dev_data->allow_interrupt_control = 0;
  253. xen_unregister_device_domain_owner(dev);
  254. spin_lock_irqsave(&found_psdev->lock, flags);
  255. found_psdev->pdev = NULL;
  256. spin_unlock_irqrestore(&found_psdev->lock, flags);
  257. pcistub_device_put(found_psdev);
  258. up_write(&pcistub_sem);
  259. }
  260. static int pcistub_match_one(struct pci_dev *dev,
  261. struct pcistub_device_id *pdev_id)
  262. {
  263. /* Match the specified device by domain, bus, slot, func and also if
  264. * any of the device's parent bridges match.
  265. */
  266. for (; dev != NULL; dev = dev->bus->self) {
  267. if (pci_domain_nr(dev->bus) == pdev_id->domain
  268. && dev->bus->number == pdev_id->bus
  269. && dev->devfn == pdev_id->devfn)
  270. return 1;
  271. /* Sometimes topmost bridge links to itself. */
  272. if (dev == dev->bus->self)
  273. break;
  274. }
  275. return 0;
  276. }
  277. static int pcistub_match(struct pci_dev *dev)
  278. {
  279. struct pcistub_device_id *pdev_id;
  280. unsigned long flags;
  281. int found = 0;
  282. spin_lock_irqsave(&device_ids_lock, flags);
  283. list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
  284. if (pcistub_match_one(dev, pdev_id)) {
  285. found = 1;
  286. break;
  287. }
  288. }
  289. spin_unlock_irqrestore(&device_ids_lock, flags);
  290. return found;
  291. }
  292. static int pcistub_init_device(struct pci_dev *dev)
  293. {
  294. struct xen_pcibk_dev_data *dev_data;
  295. int err = 0;
  296. dev_dbg(&dev->dev, "initializing...\n");
  297. /* The PCI backend is not intended to be a module (or to work with
  298. * removable PCI devices (yet). If it were, xen_pcibk_config_free()
  299. * would need to be called somewhere to free the memory allocated
  300. * here and then to call kfree(pci_get_drvdata(psdev->dev)).
  301. */
  302. dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
  303. + strlen(pci_name(dev)) + 1, GFP_KERNEL);
  304. if (!dev_data) {
  305. err = -ENOMEM;
  306. goto out;
  307. }
  308. pci_set_drvdata(dev, dev_data);
  309. /*
  310. * Setup name for fake IRQ handler. It will only be enabled
  311. * once the device is turned on by the guest.
  312. */
  313. sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
  314. dev_dbg(&dev->dev, "initializing config\n");
  315. init_waitqueue_head(&xen_pcibk_aer_wait_queue);
  316. err = xen_pcibk_config_init_dev(dev);
  317. if (err)
  318. goto out;
  319. /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
  320. * must do this here because pcibios_enable_device may specify
  321. * the pci device's true irq (and possibly its other resources)
  322. * if they differ from what's in the configuration space.
  323. * This makes the assumption that the device's resources won't
  324. * change after this point (otherwise this code may break!)
  325. */
  326. dev_dbg(&dev->dev, "enabling device\n");
  327. err = pci_enable_device(dev);
  328. if (err)
  329. goto config_release;
  330. if (dev->msix_cap) {
  331. struct physdev_pci_device ppdev = {
  332. .seg = pci_domain_nr(dev->bus),
  333. .bus = dev->bus->number,
  334. .devfn = dev->devfn
  335. };
  336. err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
  337. if (err && err != -ENOSYS)
  338. dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
  339. err);
  340. }
  341. /* We need the device active to save the state. */
  342. dev_dbg(&dev->dev, "save state of device\n");
  343. pci_save_state(dev);
  344. dev_data->pci_saved_state = pci_store_saved_state(dev);
  345. if (!dev_data->pci_saved_state)
  346. dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
  347. else {
  348. dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
  349. __pci_reset_function_locked(dev);
  350. pci_restore_state(dev);
  351. }
  352. /* Now disable the device (this also ensures some private device
  353. * data is setup before we export)
  354. */
  355. dev_dbg(&dev->dev, "reset device\n");
  356. xen_pcibk_reset_device(dev);
  357. pci_set_dev_assigned(dev);
  358. return 0;
  359. config_release:
  360. xen_pcibk_config_free_dev(dev);
  361. out:
  362. pci_set_drvdata(dev, NULL);
  363. kfree(dev_data);
  364. return err;
  365. }
  366. /*
  367. * Because some initialization still happens on
  368. * devices during fs_initcall, we need to defer
  369. * full initialization of our devices until
  370. * device_initcall.
  371. */
  372. static int __init pcistub_init_devices_late(void)
  373. {
  374. struct pcistub_device *psdev;
  375. unsigned long flags;
  376. int err = 0;
  377. spin_lock_irqsave(&pcistub_devices_lock, flags);
  378. while (!list_empty(&seized_devices)) {
  379. psdev = container_of(seized_devices.next,
  380. struct pcistub_device, dev_list);
  381. list_del(&psdev->dev_list);
  382. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  383. err = pcistub_init_device(psdev->dev);
  384. if (err) {
  385. dev_err(&psdev->dev->dev,
  386. "error %d initializing device\n", err);
  387. kfree(psdev);
  388. psdev = NULL;
  389. }
  390. spin_lock_irqsave(&pcistub_devices_lock, flags);
  391. if (psdev)
  392. list_add_tail(&psdev->dev_list, &pcistub_devices);
  393. }
  394. initialize_devices = 1;
  395. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  396. return 0;
  397. }
  398. static void pcistub_device_id_add_list(struct pcistub_device_id *new,
  399. int domain, int bus, unsigned int devfn)
  400. {
  401. struct pcistub_device_id *pci_dev_id;
  402. unsigned long flags;
  403. int found = 0;
  404. spin_lock_irqsave(&device_ids_lock, flags);
  405. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  406. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
  407. pci_dev_id->devfn == devfn) {
  408. found = 1;
  409. break;
  410. }
  411. }
  412. if (!found) {
  413. new->domain = domain;
  414. new->bus = bus;
  415. new->devfn = devfn;
  416. list_add_tail(&new->slot_list, &pcistub_device_ids);
  417. }
  418. spin_unlock_irqrestore(&device_ids_lock, flags);
  419. if (found)
  420. kfree(new);
  421. }
  422. static int pcistub_seize(struct pci_dev *dev,
  423. struct pcistub_device_id *pci_dev_id)
  424. {
  425. struct pcistub_device *psdev;
  426. unsigned long flags;
  427. int err = 0;
  428. psdev = pcistub_device_alloc(dev);
  429. if (!psdev) {
  430. kfree(pci_dev_id);
  431. return -ENOMEM;
  432. }
  433. spin_lock_irqsave(&pcistub_devices_lock, flags);
  434. if (initialize_devices) {
  435. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  436. /* don't want irqs disabled when calling pcistub_init_device */
  437. err = pcistub_init_device(psdev->dev);
  438. spin_lock_irqsave(&pcistub_devices_lock, flags);
  439. if (!err)
  440. list_add(&psdev->dev_list, &pcistub_devices);
  441. } else {
  442. dev_dbg(&dev->dev, "deferring initialization\n");
  443. list_add(&psdev->dev_list, &seized_devices);
  444. }
  445. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  446. if (err) {
  447. kfree(pci_dev_id);
  448. pcistub_device_put(psdev);
  449. } else if (pci_dev_id)
  450. pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
  451. dev->bus->number, dev->devfn);
  452. return err;
  453. }
  454. /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
  455. * other functions that take the sysfs lock. */
  456. static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
  457. {
  458. int err = 0, match;
  459. struct pcistub_device_id *pci_dev_id = NULL;
  460. dev_dbg(&dev->dev, "probing...\n");
  461. match = pcistub_match(dev);
  462. if ((dev->driver_override &&
  463. !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
  464. match) {
  465. if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
  466. && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
  467. dev_err(&dev->dev, "can't export pci devices that "
  468. "don't have a normal (0) or bridge (1) "
  469. "header type!\n");
  470. err = -ENODEV;
  471. goto out;
  472. }
  473. if (!match) {
  474. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  475. if (!pci_dev_id) {
  476. err = -ENOMEM;
  477. goto out;
  478. }
  479. }
  480. dev_info(&dev->dev, "seizing device\n");
  481. err = pcistub_seize(dev, pci_dev_id);
  482. } else
  483. /* Didn't find the device */
  484. err = -ENODEV;
  485. out:
  486. return err;
  487. }
  488. /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
  489. * other functions that take the sysfs lock. */
  490. static void pcistub_remove(struct pci_dev *dev)
  491. {
  492. struct pcistub_device *psdev, *found_psdev = NULL;
  493. unsigned long flags;
  494. dev_dbg(&dev->dev, "removing\n");
  495. spin_lock_irqsave(&pcistub_devices_lock, flags);
  496. xen_pcibk_config_quirk_release(dev);
  497. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  498. if (psdev->dev == dev) {
  499. found_psdev = psdev;
  500. break;
  501. }
  502. }
  503. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  504. if (found_psdev) {
  505. dev_dbg(&dev->dev, "found device to remove %s\n",
  506. found_psdev->pdev ? "- in-use" : "");
  507. if (found_psdev->pdev) {
  508. int domid = xen_find_device_domain_owner(dev);
  509. dev_warn(&dev->dev, "****** removing device %s while still in-use by domain %d! ******\n",
  510. pci_name(found_psdev->dev), domid);
  511. dev_warn(&dev->dev, "****** driver domain may still access this device's i/o resources!\n");
  512. dev_warn(&dev->dev, "****** shutdown driver domain before binding device\n");
  513. dev_warn(&dev->dev, "****** to other drivers or domains\n");
  514. /* N.B. This ends up calling pcistub_put_pci_dev which ends up
  515. * doing the FLR. */
  516. xen_pcibk_release_pci_dev(found_psdev->pdev,
  517. found_psdev->dev,
  518. false /* caller holds the lock. */);
  519. }
  520. spin_lock_irqsave(&pcistub_devices_lock, flags);
  521. list_del(&found_psdev->dev_list);
  522. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  523. /* the final put for releasing from the list */
  524. pcistub_device_put(found_psdev);
  525. }
  526. }
  527. static const struct pci_device_id pcistub_ids[] = {
  528. {
  529. .vendor = PCI_ANY_ID,
  530. .device = PCI_ANY_ID,
  531. .subvendor = PCI_ANY_ID,
  532. .subdevice = PCI_ANY_ID,
  533. },
  534. {0,},
  535. };
  536. #define PCI_NODENAME_MAX 40
  537. static void kill_domain_by_device(struct pcistub_device *psdev)
  538. {
  539. struct xenbus_transaction xbt;
  540. int err;
  541. char nodename[PCI_NODENAME_MAX];
  542. BUG_ON(!psdev);
  543. snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
  544. psdev->pdev->xdev->otherend_id);
  545. again:
  546. err = xenbus_transaction_start(&xbt);
  547. if (err) {
  548. dev_err(&psdev->dev->dev,
  549. "error %d when start xenbus transaction\n", err);
  550. return;
  551. }
  552. /*PV AER handlers will set this flag*/
  553. xenbus_printf(xbt, nodename, "aerState" , "aerfail");
  554. err = xenbus_transaction_end(xbt, 0);
  555. if (err) {
  556. if (err == -EAGAIN)
  557. goto again;
  558. dev_err(&psdev->dev->dev,
  559. "error %d when end xenbus transaction\n", err);
  560. return;
  561. }
  562. }
  563. /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
  564. * backend need to have cooperation. In xen_pcibk, those steps will do similar
  565. * jobs: send service request and waiting for front_end response.
  566. */
  567. static pci_ers_result_t common_process(struct pcistub_device *psdev,
  568. pci_channel_state_t state, int aer_cmd,
  569. pci_ers_result_t result)
  570. {
  571. pci_ers_result_t res = result;
  572. struct xen_pcie_aer_op *aer_op;
  573. struct xen_pcibk_device *pdev = psdev->pdev;
  574. struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
  575. int ret;
  576. /*with PV AER drivers*/
  577. aer_op = &(sh_info->aer_op);
  578. aer_op->cmd = aer_cmd ;
  579. /*useful for error_detected callback*/
  580. aer_op->err = state;
  581. /*pcifront_end BDF*/
  582. ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
  583. &aer_op->domain, &aer_op->bus, &aer_op->devfn);
  584. if (!ret) {
  585. dev_err(&psdev->dev->dev, "failed to get pcifront device\n");
  586. return PCI_ERS_RESULT_NONE;
  587. }
  588. wmb();
  589. dev_dbg(&psdev->dev->dev, "aer_op %x dom %x bus %x devfn %x\n",
  590. aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
  591. /*local flag to mark there's aer request, xen_pcibk callback will use
  592. * this flag to judge whether we need to check pci-front give aer
  593. * service ack signal
  594. */
  595. set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  596. /*It is possible that a pcifront conf_read_write ops request invokes
  597. * the callback which cause the spurious execution of wake_up.
  598. * Yet it is harmless and better than a spinlock here
  599. */
  600. set_bit(_XEN_PCIB_active,
  601. (unsigned long *)&sh_info->flags);
  602. wmb();
  603. notify_remote_via_irq(pdev->evtchn_irq);
  604. /* Enable IRQ to signal "request done". */
  605. xen_pcibk_lateeoi(pdev, 0);
  606. ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
  607. !(test_bit(_XEN_PCIB_active, (unsigned long *)
  608. &sh_info->flags)), 300*HZ);
  609. /* Enable IRQ for pcifront request if not already active. */
  610. if (!test_bit(_PDEVF_op_active, &pdev->flags))
  611. xen_pcibk_lateeoi(pdev, 0);
  612. if (!ret) {
  613. if (test_bit(_XEN_PCIB_active,
  614. (unsigned long *)&sh_info->flags)) {
  615. dev_err(&psdev->dev->dev,
  616. "pcifront aer process not responding!\n");
  617. clear_bit(_XEN_PCIB_active,
  618. (unsigned long *)&sh_info->flags);
  619. aer_op->err = PCI_ERS_RESULT_NONE;
  620. return res;
  621. }
  622. }
  623. clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  624. res = (pci_ers_result_t)aer_op->err;
  625. return res;
  626. }
  627. /*
  628. * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
  629. * of the device driver could provide this service, and then wait for pcifront
  630. * ack.
  631. * @dev: pointer to PCI devices
  632. * return value is used by aer_core do_recovery policy
  633. */
  634. static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
  635. {
  636. struct pcistub_device *psdev;
  637. pci_ers_result_t result;
  638. result = PCI_ERS_RESULT_RECOVERED;
  639. dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
  640. dev->bus->number, dev->devfn);
  641. down_write(&pcistub_sem);
  642. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  643. dev->bus->number,
  644. PCI_SLOT(dev->devfn),
  645. PCI_FUNC(dev->devfn));
  646. if (!psdev || !psdev->pdev) {
  647. dev_err(&dev->dev, "device is not found/assigned\n");
  648. goto end;
  649. }
  650. if (!psdev->pdev->sh_info) {
  651. dev_err(&dev->dev, "device is not connected or owned"
  652. " by HVM, kill it\n");
  653. kill_domain_by_device(psdev);
  654. goto end;
  655. }
  656. if (!test_bit(_XEN_PCIB_AERHANDLER,
  657. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  658. dev_err(&dev->dev,
  659. "guest with no AER driver should have been killed\n");
  660. goto end;
  661. }
  662. result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_slotreset, result);
  663. if (result == PCI_ERS_RESULT_NONE ||
  664. result == PCI_ERS_RESULT_DISCONNECT) {
  665. dev_dbg(&dev->dev,
  666. "No AER slot_reset service or disconnected!\n");
  667. kill_domain_by_device(psdev);
  668. }
  669. end:
  670. if (psdev)
  671. pcistub_device_put(psdev);
  672. up_write(&pcistub_sem);
  673. return result;
  674. }
  675. /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
  676. * in case of the device driver could provide this service, and then wait
  677. * for pcifront ack
  678. * @dev: pointer to PCI devices
  679. * return value is used by aer_core do_recovery policy
  680. */
  681. static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
  682. {
  683. struct pcistub_device *psdev;
  684. pci_ers_result_t result;
  685. result = PCI_ERS_RESULT_RECOVERED;
  686. dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
  687. dev->bus->number, dev->devfn);
  688. down_write(&pcistub_sem);
  689. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  690. dev->bus->number,
  691. PCI_SLOT(dev->devfn),
  692. PCI_FUNC(dev->devfn));
  693. if (!psdev || !psdev->pdev) {
  694. dev_err(&dev->dev, "device is not found/assigned\n");
  695. goto end;
  696. }
  697. if (!psdev->pdev->sh_info) {
  698. dev_err(&dev->dev, "device is not connected or owned"
  699. " by HVM, kill it\n");
  700. kill_domain_by_device(psdev);
  701. goto end;
  702. }
  703. if (!test_bit(_XEN_PCIB_AERHANDLER,
  704. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  705. dev_err(&dev->dev,
  706. "guest with no AER driver should have been killed\n");
  707. goto end;
  708. }
  709. result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_mmio, result);
  710. if (result == PCI_ERS_RESULT_NONE ||
  711. result == PCI_ERS_RESULT_DISCONNECT) {
  712. dev_dbg(&dev->dev,
  713. "No AER mmio_enabled service or disconnected!\n");
  714. kill_domain_by_device(psdev);
  715. }
  716. end:
  717. if (psdev)
  718. pcistub_device_put(psdev);
  719. up_write(&pcistub_sem);
  720. return result;
  721. }
  722. /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
  723. * in case of the device driver could provide this service, and then wait
  724. * for pcifront ack.
  725. * @dev: pointer to PCI devices
  726. * @error: the current PCI connection state
  727. * return value is used by aer_core do_recovery policy
  728. */
  729. static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
  730. pci_channel_state_t error)
  731. {
  732. struct pcistub_device *psdev;
  733. pci_ers_result_t result;
  734. result = PCI_ERS_RESULT_CAN_RECOVER;
  735. dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
  736. dev->bus->number, dev->devfn);
  737. down_write(&pcistub_sem);
  738. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  739. dev->bus->number,
  740. PCI_SLOT(dev->devfn),
  741. PCI_FUNC(dev->devfn));
  742. if (!psdev || !psdev->pdev) {
  743. dev_err(&dev->dev, "device is not found/assigned\n");
  744. goto end;
  745. }
  746. if (!psdev->pdev->sh_info) {
  747. dev_err(&dev->dev, "device is not connected or owned"
  748. " by HVM, kill it\n");
  749. kill_domain_by_device(psdev);
  750. goto end;
  751. }
  752. /*Guest owns the device yet no aer handler regiested, kill guest*/
  753. if (!test_bit(_XEN_PCIB_AERHANDLER,
  754. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  755. dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
  756. kill_domain_by_device(psdev);
  757. goto end;
  758. }
  759. result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
  760. if (result == PCI_ERS_RESULT_NONE ||
  761. result == PCI_ERS_RESULT_DISCONNECT) {
  762. dev_dbg(&dev->dev,
  763. "No AER error_detected service or disconnected!\n");
  764. kill_domain_by_device(psdev);
  765. }
  766. end:
  767. if (psdev)
  768. pcistub_device_put(psdev);
  769. up_write(&pcistub_sem);
  770. return result;
  771. }
  772. /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
  773. * in case of the device driver could provide this service, and then wait
  774. * for pcifront ack.
  775. * @dev: pointer to PCI devices
  776. */
  777. static void xen_pcibk_error_resume(struct pci_dev *dev)
  778. {
  779. struct pcistub_device *psdev;
  780. dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
  781. dev->bus->number, dev->devfn);
  782. down_write(&pcistub_sem);
  783. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  784. dev->bus->number,
  785. PCI_SLOT(dev->devfn),
  786. PCI_FUNC(dev->devfn));
  787. if (!psdev || !psdev->pdev) {
  788. dev_err(&dev->dev, "device is not found/assigned\n");
  789. goto end;
  790. }
  791. if (!psdev->pdev->sh_info) {
  792. dev_err(&dev->dev, "device is not connected or owned"
  793. " by HVM, kill it\n");
  794. kill_domain_by_device(psdev);
  795. goto end;
  796. }
  797. if (!test_bit(_XEN_PCIB_AERHANDLER,
  798. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  799. dev_err(&dev->dev,
  800. "guest with no AER driver should have been killed\n");
  801. kill_domain_by_device(psdev);
  802. goto end;
  803. }
  804. common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_resume,
  805. PCI_ERS_RESULT_RECOVERED);
  806. end:
  807. if (psdev)
  808. pcistub_device_put(psdev);
  809. up_write(&pcistub_sem);
  810. return;
  811. }
  812. /*add xen_pcibk AER handling*/
  813. static const struct pci_error_handlers xen_pcibk_error_handler = {
  814. .error_detected = xen_pcibk_error_detected,
  815. .mmio_enabled = xen_pcibk_mmio_enabled,
  816. .slot_reset = xen_pcibk_slot_reset,
  817. .resume = xen_pcibk_error_resume,
  818. };
  819. /*
  820. * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
  821. * for a normal device. I don't want it to be loaded automatically.
  822. */
  823. static struct pci_driver xen_pcibk_pci_driver = {
  824. /* The name should be xen_pciback, but until the tools are updated
  825. * we will keep it as pciback. */
  826. .name = PCISTUB_DRIVER_NAME,
  827. .id_table = pcistub_ids,
  828. .probe = pcistub_probe,
  829. .remove = pcistub_remove,
  830. .err_handler = &xen_pcibk_error_handler,
  831. };
  832. static inline int str_to_slot(const char *buf, int *domain, int *bus,
  833. int *slot, int *func)
  834. {
  835. int parsed = 0;
  836. switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
  837. &parsed)) {
  838. case 3:
  839. *func = -1;
  840. sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
  841. break;
  842. case 2:
  843. *slot = *func = -1;
  844. sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
  845. break;
  846. }
  847. if (parsed && !buf[parsed])
  848. return 0;
  849. /* try again without domain */
  850. *domain = 0;
  851. switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
  852. case 2:
  853. *func = -1;
  854. sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
  855. break;
  856. case 1:
  857. *slot = *func = -1;
  858. sscanf(buf, " %x:*.* %n", bus, &parsed);
  859. break;
  860. }
  861. if (parsed && !buf[parsed])
  862. return 0;
  863. return -EINVAL;
  864. }
  865. static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
  866. *slot, int *func, int *reg, int *size, int *mask)
  867. {
  868. int parsed = 0;
  869. sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
  870. reg, size, mask, &parsed);
  871. if (parsed && !buf[parsed])
  872. return 0;
  873. /* try again without domain */
  874. *domain = 0;
  875. sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
  876. mask, &parsed);
  877. if (parsed && !buf[parsed])
  878. return 0;
  879. return -EINVAL;
  880. }
  881. static int pcistub_device_id_add(int domain, int bus, int slot, int func)
  882. {
  883. struct pcistub_device_id *pci_dev_id;
  884. int rc = 0, devfn = PCI_DEVFN(slot, func);
  885. if (slot < 0) {
  886. for (slot = 0; !rc && slot < 32; ++slot)
  887. rc = pcistub_device_id_add(domain, bus, slot, func);
  888. return rc;
  889. }
  890. if (func < 0) {
  891. for (func = 0; !rc && func < 8; ++func)
  892. rc = pcistub_device_id_add(domain, bus, slot, func);
  893. return rc;
  894. }
  895. if ((
  896. #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
  897. || !defined(CONFIG_PCI_DOMAINS)
  898. !pci_domains_supported ? domain :
  899. #endif
  900. domain < 0 || domain > 0xffff)
  901. || bus < 0 || bus > 0xff
  902. || PCI_SLOT(devfn) != slot
  903. || PCI_FUNC(devfn) != func)
  904. return -EINVAL;
  905. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  906. if (!pci_dev_id)
  907. return -ENOMEM;
  908. pr_debug("wants to seize %04x:%02x:%02x.%d\n",
  909. domain, bus, slot, func);
  910. pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
  911. return 0;
  912. }
  913. static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
  914. {
  915. struct pcistub_device_id *pci_dev_id, *t;
  916. int err = -ENOENT;
  917. unsigned long flags;
  918. spin_lock_irqsave(&device_ids_lock, flags);
  919. list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
  920. slot_list) {
  921. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
  922. && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
  923. && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
  924. /* Don't break; here because it's possible the same
  925. * slot could be in the list more than once
  926. */
  927. list_del(&pci_dev_id->slot_list);
  928. kfree(pci_dev_id);
  929. err = 0;
  930. pr_debug("removed %04x:%02x:%02x.%d from seize list\n",
  931. domain, bus, slot, func);
  932. }
  933. }
  934. spin_unlock_irqrestore(&device_ids_lock, flags);
  935. return err;
  936. }
  937. static int pcistub_reg_add(int domain, int bus, int slot, int func,
  938. unsigned int reg, unsigned int size,
  939. unsigned int mask)
  940. {
  941. int err = 0;
  942. struct pcistub_device *psdev;
  943. struct pci_dev *dev;
  944. struct config_field *field;
  945. if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
  946. return -EINVAL;
  947. psdev = pcistub_device_find(domain, bus, slot, func);
  948. if (!psdev) {
  949. err = -ENODEV;
  950. goto out;
  951. }
  952. dev = psdev->dev;
  953. field = kzalloc(sizeof(*field), GFP_KERNEL);
  954. if (!field) {
  955. err = -ENOMEM;
  956. goto out;
  957. }
  958. field->offset = reg;
  959. field->size = size;
  960. field->mask = mask;
  961. field->init = NULL;
  962. field->reset = NULL;
  963. field->release = NULL;
  964. field->clean = xen_pcibk_config_field_free;
  965. err = xen_pcibk_config_quirks_add_field(dev, field);
  966. if (err)
  967. kfree(field);
  968. out:
  969. if (psdev)
  970. pcistub_device_put(psdev);
  971. return err;
  972. }
  973. static ssize_t new_slot_store(struct device_driver *drv, const char *buf,
  974. size_t count)
  975. {
  976. int domain, bus, slot, func;
  977. int err;
  978. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  979. if (err)
  980. goto out;
  981. err = pcistub_device_id_add(domain, bus, slot, func);
  982. out:
  983. if (!err)
  984. err = count;
  985. return err;
  986. }
  987. static DRIVER_ATTR_WO(new_slot);
  988. static ssize_t remove_slot_store(struct device_driver *drv, const char *buf,
  989. size_t count)
  990. {
  991. int domain, bus, slot, func;
  992. int err;
  993. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  994. if (err)
  995. goto out;
  996. err = pcistub_device_id_remove(domain, bus, slot, func);
  997. out:
  998. if (!err)
  999. err = count;
  1000. return err;
  1001. }
  1002. static DRIVER_ATTR_WO(remove_slot);
  1003. static ssize_t slots_show(struct device_driver *drv, char *buf)
  1004. {
  1005. struct pcistub_device_id *pci_dev_id;
  1006. size_t count = 0;
  1007. unsigned long flags;
  1008. spin_lock_irqsave(&device_ids_lock, flags);
  1009. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  1010. if (count >= PAGE_SIZE)
  1011. break;
  1012. count += scnprintf(buf + count, PAGE_SIZE - count,
  1013. "%04x:%02x:%02x.%d\n",
  1014. pci_dev_id->domain, pci_dev_id->bus,
  1015. PCI_SLOT(pci_dev_id->devfn),
  1016. PCI_FUNC(pci_dev_id->devfn));
  1017. }
  1018. spin_unlock_irqrestore(&device_ids_lock, flags);
  1019. return count;
  1020. }
  1021. static DRIVER_ATTR_RO(slots);
  1022. static ssize_t irq_handlers_show(struct device_driver *drv, char *buf)
  1023. {
  1024. struct pcistub_device *psdev;
  1025. struct xen_pcibk_dev_data *dev_data;
  1026. size_t count = 0;
  1027. unsigned long flags;
  1028. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1029. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1030. if (count >= PAGE_SIZE)
  1031. break;
  1032. if (!psdev->dev)
  1033. continue;
  1034. dev_data = pci_get_drvdata(psdev->dev);
  1035. if (!dev_data)
  1036. continue;
  1037. count +=
  1038. scnprintf(buf + count, PAGE_SIZE - count,
  1039. "%s:%s:%sing:%ld\n",
  1040. pci_name(psdev->dev),
  1041. dev_data->isr_on ? "on" : "off",
  1042. dev_data->ack_intr ? "ack" : "not ack",
  1043. dev_data->handled);
  1044. }
  1045. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1046. return count;
  1047. }
  1048. static DRIVER_ATTR_RO(irq_handlers);
  1049. static ssize_t irq_handler_state_store(struct device_driver *drv,
  1050. const char *buf, size_t count)
  1051. {
  1052. struct pcistub_device *psdev;
  1053. struct xen_pcibk_dev_data *dev_data;
  1054. int domain, bus, slot, func;
  1055. int err;
  1056. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1057. if (err)
  1058. return err;
  1059. psdev = pcistub_device_find(domain, bus, slot, func);
  1060. if (!psdev) {
  1061. err = -ENOENT;
  1062. goto out;
  1063. }
  1064. dev_data = pci_get_drvdata(psdev->dev);
  1065. if (!dev_data) {
  1066. err = -ENOENT;
  1067. goto out;
  1068. }
  1069. dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
  1070. dev_data->irq_name, dev_data->isr_on,
  1071. !dev_data->isr_on);
  1072. dev_data->isr_on = !(dev_data->isr_on);
  1073. if (dev_data->isr_on)
  1074. dev_data->ack_intr = 1;
  1075. out:
  1076. if (psdev)
  1077. pcistub_device_put(psdev);
  1078. if (!err)
  1079. err = count;
  1080. return err;
  1081. }
  1082. static DRIVER_ATTR_WO(irq_handler_state);
  1083. static ssize_t quirks_store(struct device_driver *drv, const char *buf,
  1084. size_t count)
  1085. {
  1086. int domain, bus, slot, func, reg, size, mask;
  1087. int err;
  1088. err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
  1089. &mask);
  1090. if (err)
  1091. goto out;
  1092. err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
  1093. out:
  1094. if (!err)
  1095. err = count;
  1096. return err;
  1097. }
  1098. static ssize_t quirks_show(struct device_driver *drv, char *buf)
  1099. {
  1100. int count = 0;
  1101. unsigned long flags;
  1102. struct xen_pcibk_config_quirk *quirk;
  1103. struct xen_pcibk_dev_data *dev_data;
  1104. const struct config_field *field;
  1105. const struct config_field_entry *cfg_entry;
  1106. spin_lock_irqsave(&device_ids_lock, flags);
  1107. list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
  1108. if (count >= PAGE_SIZE)
  1109. goto out;
  1110. count += scnprintf(buf + count, PAGE_SIZE - count,
  1111. "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
  1112. quirk->pdev->bus->number,
  1113. PCI_SLOT(quirk->pdev->devfn),
  1114. PCI_FUNC(quirk->pdev->devfn),
  1115. quirk->devid.vendor, quirk->devid.device,
  1116. quirk->devid.subvendor,
  1117. quirk->devid.subdevice);
  1118. dev_data = pci_get_drvdata(quirk->pdev);
  1119. list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
  1120. field = cfg_entry->field;
  1121. if (count >= PAGE_SIZE)
  1122. goto out;
  1123. count += scnprintf(buf + count, PAGE_SIZE - count,
  1124. "\t\t%08x:%01x:%08x\n",
  1125. cfg_entry->base_offset +
  1126. field->offset, field->size,
  1127. field->mask);
  1128. }
  1129. }
  1130. out:
  1131. spin_unlock_irqrestore(&device_ids_lock, flags);
  1132. return count;
  1133. }
  1134. static DRIVER_ATTR_RW(quirks);
  1135. static ssize_t permissive_store(struct device_driver *drv, const char *buf,
  1136. size_t count)
  1137. {
  1138. int domain, bus, slot, func;
  1139. int err;
  1140. struct pcistub_device *psdev;
  1141. struct xen_pcibk_dev_data *dev_data;
  1142. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1143. if (err)
  1144. goto out;
  1145. psdev = pcistub_device_find(domain, bus, slot, func);
  1146. if (!psdev) {
  1147. err = -ENODEV;
  1148. goto out;
  1149. }
  1150. dev_data = pci_get_drvdata(psdev->dev);
  1151. /* the driver data for a device should never be null at this point */
  1152. if (!dev_data) {
  1153. err = -ENXIO;
  1154. goto release;
  1155. }
  1156. if (!dev_data->permissive) {
  1157. dev_data->permissive = 1;
  1158. /* Let user know that what they're doing could be unsafe */
  1159. dev_warn(&psdev->dev->dev, "enabling permissive mode "
  1160. "configuration space accesses!\n");
  1161. dev_warn(&psdev->dev->dev,
  1162. "permissive mode is potentially unsafe!\n");
  1163. }
  1164. release:
  1165. pcistub_device_put(psdev);
  1166. out:
  1167. if (!err)
  1168. err = count;
  1169. return err;
  1170. }
  1171. static ssize_t permissive_show(struct device_driver *drv, char *buf)
  1172. {
  1173. struct pcistub_device *psdev;
  1174. struct xen_pcibk_dev_data *dev_data;
  1175. size_t count = 0;
  1176. unsigned long flags;
  1177. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1178. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1179. if (count >= PAGE_SIZE)
  1180. break;
  1181. if (!psdev->dev)
  1182. continue;
  1183. dev_data = pci_get_drvdata(psdev->dev);
  1184. if (!dev_data || !dev_data->permissive)
  1185. continue;
  1186. count +=
  1187. scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
  1188. pci_name(psdev->dev));
  1189. }
  1190. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1191. return count;
  1192. }
  1193. static DRIVER_ATTR_RW(permissive);
  1194. static ssize_t allow_interrupt_control_store(struct device_driver *drv,
  1195. const char *buf, size_t count)
  1196. {
  1197. int domain, bus, slot, func;
  1198. int err;
  1199. struct pcistub_device *psdev;
  1200. struct xen_pcibk_dev_data *dev_data;
  1201. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1202. if (err)
  1203. goto out;
  1204. psdev = pcistub_device_find(domain, bus, slot, func);
  1205. if (!psdev) {
  1206. err = -ENODEV;
  1207. goto out;
  1208. }
  1209. dev_data = pci_get_drvdata(psdev->dev);
  1210. /* the driver data for a device should never be null at this point */
  1211. if (!dev_data) {
  1212. err = -ENXIO;
  1213. goto release;
  1214. }
  1215. dev_data->allow_interrupt_control = 1;
  1216. release:
  1217. pcistub_device_put(psdev);
  1218. out:
  1219. if (!err)
  1220. err = count;
  1221. return err;
  1222. }
  1223. static ssize_t allow_interrupt_control_show(struct device_driver *drv,
  1224. char *buf)
  1225. {
  1226. struct pcistub_device *psdev;
  1227. struct xen_pcibk_dev_data *dev_data;
  1228. size_t count = 0;
  1229. unsigned long flags;
  1230. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1231. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1232. if (count >= PAGE_SIZE)
  1233. break;
  1234. if (!psdev->dev)
  1235. continue;
  1236. dev_data = pci_get_drvdata(psdev->dev);
  1237. if (!dev_data || !dev_data->allow_interrupt_control)
  1238. continue;
  1239. count +=
  1240. scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
  1241. pci_name(psdev->dev));
  1242. }
  1243. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1244. return count;
  1245. }
  1246. static DRIVER_ATTR_RW(allow_interrupt_control);
  1247. static void pcistub_exit(void)
  1248. {
  1249. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
  1250. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1251. &driver_attr_remove_slot);
  1252. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
  1253. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
  1254. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1255. &driver_attr_permissive);
  1256. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1257. &driver_attr_allow_interrupt_control);
  1258. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1259. &driver_attr_irq_handlers);
  1260. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1261. &driver_attr_irq_handler_state);
  1262. pci_unregister_driver(&xen_pcibk_pci_driver);
  1263. }
  1264. static int __init pcistub_init(void)
  1265. {
  1266. int pos = 0;
  1267. int err = 0;
  1268. int domain, bus, slot, func;
  1269. int parsed;
  1270. if (pci_devs_to_hide && *pci_devs_to_hide) {
  1271. do {
  1272. parsed = 0;
  1273. err = sscanf(pci_devs_to_hide + pos,
  1274. " (%x:%x:%x.%x) %n",
  1275. &domain, &bus, &slot, &func, &parsed);
  1276. switch (err) {
  1277. case 3:
  1278. func = -1;
  1279. sscanf(pci_devs_to_hide + pos,
  1280. " (%x:%x:%x.*) %n",
  1281. &domain, &bus, &slot, &parsed);
  1282. break;
  1283. case 2:
  1284. slot = func = -1;
  1285. sscanf(pci_devs_to_hide + pos,
  1286. " (%x:%x:*.*) %n",
  1287. &domain, &bus, &parsed);
  1288. break;
  1289. }
  1290. if (!parsed) {
  1291. domain = 0;
  1292. err = sscanf(pci_devs_to_hide + pos,
  1293. " (%x:%x.%x) %n",
  1294. &bus, &slot, &func, &parsed);
  1295. switch (err) {
  1296. case 2:
  1297. func = -1;
  1298. sscanf(pci_devs_to_hide + pos,
  1299. " (%x:%x.*) %n",
  1300. &bus, &slot, &parsed);
  1301. break;
  1302. case 1:
  1303. slot = func = -1;
  1304. sscanf(pci_devs_to_hide + pos,
  1305. " (%x:*.*) %n",
  1306. &bus, &parsed);
  1307. break;
  1308. }
  1309. }
  1310. if (parsed <= 0)
  1311. goto parse_error;
  1312. err = pcistub_device_id_add(domain, bus, slot, func);
  1313. if (err)
  1314. goto out;
  1315. pos += parsed;
  1316. } while (pci_devs_to_hide[pos]);
  1317. }
  1318. /* If we're the first PCI Device Driver to register, we're the
  1319. * first one to get offered PCI devices as they become
  1320. * available (and thus we can be the first to grab them)
  1321. */
  1322. err = pci_register_driver(&xen_pcibk_pci_driver);
  1323. if (err < 0)
  1324. goto out;
  1325. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1326. &driver_attr_new_slot);
  1327. if (!err)
  1328. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1329. &driver_attr_remove_slot);
  1330. if (!err)
  1331. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1332. &driver_attr_slots);
  1333. if (!err)
  1334. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1335. &driver_attr_quirks);
  1336. if (!err)
  1337. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1338. &driver_attr_permissive);
  1339. if (!err)
  1340. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1341. &driver_attr_allow_interrupt_control);
  1342. if (!err)
  1343. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1344. &driver_attr_irq_handlers);
  1345. if (!err)
  1346. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1347. &driver_attr_irq_handler_state);
  1348. if (err)
  1349. pcistub_exit();
  1350. out:
  1351. return err;
  1352. parse_error:
  1353. pr_err("Error parsing pci_devs_to_hide at \"%s\"\n",
  1354. pci_devs_to_hide + pos);
  1355. return -EINVAL;
  1356. }
  1357. #ifndef MODULE
  1358. /*
  1359. * fs_initcall happens before device_initcall
  1360. * so xen_pcibk *should* get called first (b/c we
  1361. * want to suck up any device before other drivers
  1362. * get a chance by being the first pci device
  1363. * driver to register)
  1364. */
  1365. fs_initcall(pcistub_init);
  1366. #endif
  1367. #ifdef CONFIG_PCI_IOV
  1368. static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
  1369. {
  1370. struct pcistub_device *psdev = NULL;
  1371. unsigned long flags;
  1372. bool found = false;
  1373. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1374. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1375. if (!psdev->pdev && psdev->dev != pdev
  1376. && pci_physfn(psdev->dev) == pdev) {
  1377. found = true;
  1378. break;
  1379. }
  1380. }
  1381. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1382. if (found)
  1383. return psdev;
  1384. return NULL;
  1385. }
  1386. static int pci_stub_notifier(struct notifier_block *nb,
  1387. unsigned long action, void *data)
  1388. {
  1389. struct device *dev = data;
  1390. const struct pci_dev *pdev = to_pci_dev(dev);
  1391. if (action != BUS_NOTIFY_UNBIND_DRIVER)
  1392. return NOTIFY_DONE;
  1393. if (!pdev->is_physfn)
  1394. return NOTIFY_DONE;
  1395. for (;;) {
  1396. struct pcistub_device *psdev = find_vfs(pdev);
  1397. if (!psdev)
  1398. break;
  1399. device_release_driver(&psdev->dev->dev);
  1400. }
  1401. return NOTIFY_DONE;
  1402. }
  1403. static struct notifier_block pci_stub_nb = {
  1404. .notifier_call = pci_stub_notifier,
  1405. };
  1406. #endif
  1407. static int __init xen_pcibk_init(void)
  1408. {
  1409. int err;
  1410. if (!xen_initial_domain())
  1411. return -ENODEV;
  1412. err = xen_pcibk_config_init();
  1413. if (err)
  1414. return err;
  1415. #ifdef MODULE
  1416. err = pcistub_init();
  1417. if (err < 0)
  1418. return err;
  1419. #endif
  1420. pcistub_init_devices_late();
  1421. err = xen_pcibk_xenbus_register();
  1422. if (err)
  1423. pcistub_exit();
  1424. #ifdef CONFIG_PCI_IOV
  1425. else
  1426. bus_register_notifier(&pci_bus_type, &pci_stub_nb);
  1427. #endif
  1428. return err;
  1429. }
  1430. static void __exit xen_pcibk_cleanup(void)
  1431. {
  1432. #ifdef CONFIG_PCI_IOV
  1433. bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
  1434. #endif
  1435. xen_pcibk_xenbus_unregister();
  1436. pcistub_exit();
  1437. }
  1438. module_init(xen_pcibk_init);
  1439. module_exit(xen_pcibk_cleanup);
  1440. MODULE_LICENSE("Dual BSD/GPL");
  1441. MODULE_ALIAS("xen-backend:pci");