pci-driver.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <[email protected]>
  4. * (C) Copyright 2007 Novell Inc.
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/device.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/sched/isolation.h>
  15. #include <linux/cpu.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/suspend.h>
  18. #include <linux/kexec.h>
  19. #include <linux/of_device.h>
  20. #include <linux/acpi.h>
  21. #include <linux/dma-map-ops.h>
  22. #include <linux/iommu.h>
  23. #include "pci.h"
  24. #include "pcie/portdrv.h"
  25. struct pci_dynid {
  26. struct list_head node;
  27. struct pci_device_id id;
  28. };
  29. /**
  30. * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
  31. * @drv: target pci driver
  32. * @vendor: PCI vendor ID
  33. * @device: PCI device ID
  34. * @subvendor: PCI subvendor ID
  35. * @subdevice: PCI subdevice ID
  36. * @class: PCI class
  37. * @class_mask: PCI class mask
  38. * @driver_data: private driver data
  39. *
  40. * Adds a new dynamic pci device ID to this driver and causes the
  41. * driver to probe for all devices again. @drv must have been
  42. * registered prior to calling this function.
  43. *
  44. * CONTEXT:
  45. * Does GFP_KERNEL allocation.
  46. *
  47. * RETURNS:
  48. * 0 on success, -errno on failure.
  49. */
  50. int pci_add_dynid(struct pci_driver *drv,
  51. unsigned int vendor, unsigned int device,
  52. unsigned int subvendor, unsigned int subdevice,
  53. unsigned int class, unsigned int class_mask,
  54. unsigned long driver_data)
  55. {
  56. struct pci_dynid *dynid;
  57. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  58. if (!dynid)
  59. return -ENOMEM;
  60. dynid->id.vendor = vendor;
  61. dynid->id.device = device;
  62. dynid->id.subvendor = subvendor;
  63. dynid->id.subdevice = subdevice;
  64. dynid->id.class = class;
  65. dynid->id.class_mask = class_mask;
  66. dynid->id.driver_data = driver_data;
  67. spin_lock(&drv->dynids.lock);
  68. list_add_tail(&dynid->node, &drv->dynids.list);
  69. spin_unlock(&drv->dynids.lock);
  70. return driver_attach(&drv->driver);
  71. }
  72. EXPORT_SYMBOL_GPL(pci_add_dynid);
  73. static void pci_free_dynids(struct pci_driver *drv)
  74. {
  75. struct pci_dynid *dynid, *n;
  76. spin_lock(&drv->dynids.lock);
  77. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  78. list_del(&dynid->node);
  79. kfree(dynid);
  80. }
  81. spin_unlock(&drv->dynids.lock);
  82. }
  83. /**
  84. * pci_match_id - See if a PCI device matches a given pci_id table
  85. * @ids: array of PCI device ID structures to search in
  86. * @dev: the PCI device structure to match against.
  87. *
  88. * Used by a driver to check whether a PCI device is in its list of
  89. * supported devices. Returns the matching pci_device_id structure or
  90. * %NULL if there is no match.
  91. *
  92. * Deprecated; don't use this as it will not catch any dynamic IDs
  93. * that a driver might want to check for.
  94. */
  95. const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
  96. struct pci_dev *dev)
  97. {
  98. if (ids) {
  99. while (ids->vendor || ids->subvendor || ids->class_mask) {
  100. if (pci_match_one_device(ids, dev))
  101. return ids;
  102. ids++;
  103. }
  104. }
  105. return NULL;
  106. }
  107. EXPORT_SYMBOL(pci_match_id);
  108. static const struct pci_device_id pci_device_id_any = {
  109. .vendor = PCI_ANY_ID,
  110. .device = PCI_ANY_ID,
  111. .subvendor = PCI_ANY_ID,
  112. .subdevice = PCI_ANY_ID,
  113. };
  114. /**
  115. * pci_match_device - See if a device matches a driver's list of IDs
  116. * @drv: the PCI driver to match against
  117. * @dev: the PCI device structure to match against
  118. *
  119. * Used by a driver to check whether a PCI device is in its list of
  120. * supported devices or in the dynids list, which may have been augmented
  121. * via the sysfs "new_id" file. Returns the matching pci_device_id
  122. * structure or %NULL if there is no match.
  123. */
  124. static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
  125. struct pci_dev *dev)
  126. {
  127. struct pci_dynid *dynid;
  128. const struct pci_device_id *found_id = NULL, *ids;
  129. /* When driver_override is set, only bind to the matching driver */
  130. if (dev->driver_override && strcmp(dev->driver_override, drv->name))
  131. return NULL;
  132. /* Look at the dynamic ids first, before the static ones */
  133. spin_lock(&drv->dynids.lock);
  134. list_for_each_entry(dynid, &drv->dynids.list, node) {
  135. if (pci_match_one_device(&dynid->id, dev)) {
  136. found_id = &dynid->id;
  137. break;
  138. }
  139. }
  140. spin_unlock(&drv->dynids.lock);
  141. if (found_id)
  142. return found_id;
  143. for (ids = drv->id_table; (found_id = pci_match_id(ids, dev));
  144. ids = found_id + 1) {
  145. /*
  146. * The match table is split based on driver_override.
  147. * In case override_only was set, enforce driver_override
  148. * matching.
  149. */
  150. if (found_id->override_only) {
  151. if (dev->driver_override)
  152. return found_id;
  153. } else {
  154. return found_id;
  155. }
  156. }
  157. /* driver_override will always match, send a dummy id */
  158. if (dev->driver_override)
  159. return &pci_device_id_any;
  160. return NULL;
  161. }
  162. /**
  163. * new_id_store - sysfs frontend to pci_add_dynid()
  164. * @driver: target device driver
  165. * @buf: buffer for scanning device ID data
  166. * @count: input size
  167. *
  168. * Allow PCI IDs to be added to an existing driver via sysfs.
  169. */
  170. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  171. size_t count)
  172. {
  173. struct pci_driver *pdrv = to_pci_driver(driver);
  174. const struct pci_device_id *ids = pdrv->id_table;
  175. u32 vendor, device, subvendor = PCI_ANY_ID,
  176. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  177. unsigned long driver_data = 0;
  178. int fields = 0;
  179. int retval = 0;
  180. fields = sscanf(buf, "%x %x %x %x %x %x %lx",
  181. &vendor, &device, &subvendor, &subdevice,
  182. &class, &class_mask, &driver_data);
  183. if (fields < 2)
  184. return -EINVAL;
  185. if (fields != 7) {
  186. struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  187. if (!pdev)
  188. return -ENOMEM;
  189. pdev->vendor = vendor;
  190. pdev->device = device;
  191. pdev->subsystem_vendor = subvendor;
  192. pdev->subsystem_device = subdevice;
  193. pdev->class = class;
  194. if (pci_match_device(pdrv, pdev))
  195. retval = -EEXIST;
  196. kfree(pdev);
  197. if (retval)
  198. return retval;
  199. }
  200. /* Only accept driver_data values that match an existing id_table
  201. entry */
  202. if (ids) {
  203. retval = -EINVAL;
  204. while (ids->vendor || ids->subvendor || ids->class_mask) {
  205. if (driver_data == ids->driver_data) {
  206. retval = 0;
  207. break;
  208. }
  209. ids++;
  210. }
  211. if (retval) /* No match */
  212. return retval;
  213. }
  214. retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
  215. class, class_mask, driver_data);
  216. if (retval)
  217. return retval;
  218. return count;
  219. }
  220. static DRIVER_ATTR_WO(new_id);
  221. /**
  222. * remove_id_store - remove a PCI device ID from this driver
  223. * @driver: target device driver
  224. * @buf: buffer for scanning device ID data
  225. * @count: input size
  226. *
  227. * Removes a dynamic pci device ID to this driver.
  228. */
  229. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  230. size_t count)
  231. {
  232. struct pci_dynid *dynid, *n;
  233. struct pci_driver *pdrv = to_pci_driver(driver);
  234. u32 vendor, device, subvendor = PCI_ANY_ID,
  235. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  236. int fields = 0;
  237. size_t retval = -ENODEV;
  238. fields = sscanf(buf, "%x %x %x %x %x %x",
  239. &vendor, &device, &subvendor, &subdevice,
  240. &class, &class_mask);
  241. if (fields < 2)
  242. return -EINVAL;
  243. spin_lock(&pdrv->dynids.lock);
  244. list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
  245. struct pci_device_id *id = &dynid->id;
  246. if ((id->vendor == vendor) &&
  247. (id->device == device) &&
  248. (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
  249. (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
  250. !((id->class ^ class) & class_mask)) {
  251. list_del(&dynid->node);
  252. kfree(dynid);
  253. retval = count;
  254. break;
  255. }
  256. }
  257. spin_unlock(&pdrv->dynids.lock);
  258. return retval;
  259. }
  260. static DRIVER_ATTR_WO(remove_id);
  261. static struct attribute *pci_drv_attrs[] = {
  262. &driver_attr_new_id.attr,
  263. &driver_attr_remove_id.attr,
  264. NULL,
  265. };
  266. ATTRIBUTE_GROUPS(pci_drv);
  267. struct drv_dev_and_id {
  268. struct pci_driver *drv;
  269. struct pci_dev *dev;
  270. const struct pci_device_id *id;
  271. };
  272. static long local_pci_probe(void *_ddi)
  273. {
  274. struct drv_dev_and_id *ddi = _ddi;
  275. struct pci_dev *pci_dev = ddi->dev;
  276. struct pci_driver *pci_drv = ddi->drv;
  277. struct device *dev = &pci_dev->dev;
  278. int rc;
  279. /*
  280. * Unbound PCI devices are always put in D0, regardless of
  281. * runtime PM status. During probe, the device is set to
  282. * active and the usage count is incremented. If the driver
  283. * supports runtime PM, it should call pm_runtime_put_noidle(),
  284. * or any other runtime PM helper function decrementing the usage
  285. * count, in its probe routine and pm_runtime_get_noresume() in
  286. * its remove routine.
  287. */
  288. pm_runtime_get_sync(dev);
  289. pci_dev->driver = pci_drv;
  290. rc = pci_drv->probe(pci_dev, ddi->id);
  291. if (!rc)
  292. return rc;
  293. if (rc < 0) {
  294. pci_dev->driver = NULL;
  295. pm_runtime_put_sync(dev);
  296. return rc;
  297. }
  298. /*
  299. * Probe function should return < 0 for failure, 0 for success
  300. * Treat values > 0 as success, but warn.
  301. */
  302. pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n",
  303. rc);
  304. return 0;
  305. }
  306. static bool pci_physfn_is_probed(struct pci_dev *dev)
  307. {
  308. #ifdef CONFIG_PCI_IOV
  309. return dev->is_virtfn && dev->physfn->is_probed;
  310. #else
  311. return false;
  312. #endif
  313. }
  314. static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
  315. const struct pci_device_id *id)
  316. {
  317. int error, node, cpu;
  318. struct drv_dev_and_id ddi = { drv, dev, id };
  319. /*
  320. * Execute driver initialization on node where the device is
  321. * attached. This way the driver likely allocates its local memory
  322. * on the right node.
  323. */
  324. node = dev_to_node(&dev->dev);
  325. dev->is_probed = 1;
  326. cpu_hotplug_disable();
  327. /*
  328. * Prevent nesting work_on_cpu() for the case where a Virtual Function
  329. * device is probed from work_on_cpu() of the Physical device.
  330. */
  331. if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
  332. pci_physfn_is_probed(dev)) {
  333. cpu = nr_cpu_ids;
  334. } else {
  335. cpumask_var_t wq_domain_mask;
  336. if (!zalloc_cpumask_var(&wq_domain_mask, GFP_KERNEL)) {
  337. error = -ENOMEM;
  338. goto out;
  339. }
  340. cpumask_and(wq_domain_mask,
  341. housekeeping_cpumask(HK_TYPE_WQ),
  342. housekeeping_cpumask(HK_TYPE_DOMAIN));
  343. cpu = cpumask_any_and(cpumask_of_node(node),
  344. wq_domain_mask);
  345. free_cpumask_var(wq_domain_mask);
  346. }
  347. if (cpu < nr_cpu_ids)
  348. error = work_on_cpu(cpu, local_pci_probe, &ddi);
  349. else
  350. error = local_pci_probe(&ddi);
  351. out:
  352. dev->is_probed = 0;
  353. cpu_hotplug_enable();
  354. return error;
  355. }
  356. /**
  357. * __pci_device_probe - check if a driver wants to claim a specific PCI device
  358. * @drv: driver to call to check if it wants the PCI device
  359. * @pci_dev: PCI device being probed
  360. *
  361. * returns 0 on success, else error.
  362. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
  363. */
  364. static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
  365. {
  366. const struct pci_device_id *id;
  367. int error = 0;
  368. if (drv->probe) {
  369. error = -ENODEV;
  370. id = pci_match_device(drv, pci_dev);
  371. if (id)
  372. error = pci_call_probe(drv, pci_dev, id);
  373. }
  374. return error;
  375. }
  376. int __weak pcibios_alloc_irq(struct pci_dev *dev)
  377. {
  378. return 0;
  379. }
  380. void __weak pcibios_free_irq(struct pci_dev *dev)
  381. {
  382. }
  383. #ifdef CONFIG_PCI_IOV
  384. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  385. {
  386. return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
  387. pdev->driver_override);
  388. }
  389. #else
  390. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  391. {
  392. return true;
  393. }
  394. #endif
  395. static int pci_device_probe(struct device *dev)
  396. {
  397. int error;
  398. struct pci_dev *pci_dev = to_pci_dev(dev);
  399. struct pci_driver *drv = to_pci_driver(dev->driver);
  400. if (!pci_device_can_probe(pci_dev))
  401. return -ENODEV;
  402. pci_assign_irq(pci_dev);
  403. error = pcibios_alloc_irq(pci_dev);
  404. if (error < 0)
  405. return error;
  406. pci_dev_get(pci_dev);
  407. error = __pci_device_probe(drv, pci_dev);
  408. if (error) {
  409. pcibios_free_irq(pci_dev);
  410. pci_dev_put(pci_dev);
  411. }
  412. return error;
  413. }
  414. static void pci_device_remove(struct device *dev)
  415. {
  416. struct pci_dev *pci_dev = to_pci_dev(dev);
  417. struct pci_driver *drv = pci_dev->driver;
  418. if (drv->remove) {
  419. pm_runtime_get_sync(dev);
  420. drv->remove(pci_dev);
  421. pm_runtime_put_noidle(dev);
  422. }
  423. pcibios_free_irq(pci_dev);
  424. pci_dev->driver = NULL;
  425. pci_iov_remove(pci_dev);
  426. /* Undo the runtime PM settings in local_pci_probe() */
  427. pm_runtime_put_sync(dev);
  428. /*
  429. * If the device is still on, set the power state as "unknown",
  430. * since it might change by the next time we load the driver.
  431. */
  432. if (pci_dev->current_state == PCI_D0)
  433. pci_dev->current_state = PCI_UNKNOWN;
  434. /*
  435. * We would love to complain here if pci_dev->is_enabled is set, that
  436. * the driver should have called pci_disable_device(), but the
  437. * unfortunate fact is there are too many odd BIOS and bridge setups
  438. * that don't like drivers doing that all of the time.
  439. * Oh well, we can dream of sane hardware when we sleep, no matter how
  440. * horrible the crap we have to deal with is when we are awake...
  441. */
  442. pci_dev_put(pci_dev);
  443. }
  444. static void pci_device_shutdown(struct device *dev)
  445. {
  446. struct pci_dev *pci_dev = to_pci_dev(dev);
  447. struct pci_driver *drv = pci_dev->driver;
  448. pm_runtime_resume(dev);
  449. if (drv && drv->shutdown)
  450. drv->shutdown(pci_dev);
  451. /*
  452. * If this is a kexec reboot, turn off Bus Master bit on the
  453. * device to tell it to not continue to do DMA. Don't touch
  454. * devices in D3cold or unknown states.
  455. * If it is not a kexec reboot, firmware will hit the PCI
  456. * devices with big hammer and stop their DMA any way.
  457. */
  458. if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
  459. pci_clear_master(pci_dev);
  460. }
  461. #ifdef CONFIG_PM_SLEEP
  462. /* Auxiliary functions used for system resume */
  463. /**
  464. * pci_restore_standard_config - restore standard config registers of PCI device
  465. * @pci_dev: PCI device to handle
  466. */
  467. static int pci_restore_standard_config(struct pci_dev *pci_dev)
  468. {
  469. pci_update_current_state(pci_dev, PCI_UNKNOWN);
  470. if (pci_dev->current_state != PCI_D0) {
  471. int error = pci_set_power_state(pci_dev, PCI_D0);
  472. if (error)
  473. return error;
  474. }
  475. pci_restore_state(pci_dev);
  476. pci_pme_restore(pci_dev);
  477. return 0;
  478. }
  479. #endif /* CONFIG_PM_SLEEP */
  480. #ifdef CONFIG_PM
  481. /* Auxiliary functions used for system resume and run-time resume */
  482. static void pci_pm_default_resume(struct pci_dev *pci_dev)
  483. {
  484. pci_fixup_device(pci_fixup_resume, pci_dev);
  485. pci_enable_wake(pci_dev, PCI_D0, false);
  486. }
  487. static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
  488. {
  489. pci_power_up(pci_dev);
  490. pci_update_current_state(pci_dev, PCI_D0);
  491. }
  492. static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
  493. {
  494. pci_pm_power_up_and_verify_state(pci_dev);
  495. pci_restore_state(pci_dev);
  496. pci_pme_restore(pci_dev);
  497. }
  498. static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
  499. {
  500. pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
  501. /*
  502. * When powering on a bridge from D3cold, the whole hierarchy may be
  503. * powered on into D0uninitialized state, resume them to give them a
  504. * chance to suspend again
  505. */
  506. pci_resume_bus(pci_dev->subordinate);
  507. }
  508. #endif /* CONFIG_PM */
  509. #ifdef CONFIG_PM_SLEEP
  510. /*
  511. * Default "suspend" method for devices that have no driver provided suspend,
  512. * or not even a driver at all (second part).
  513. */
  514. static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
  515. {
  516. /*
  517. * mark its power state as "unknown", since we don't know if
  518. * e.g. the BIOS will change its device state when we suspend.
  519. */
  520. if (pci_dev->current_state == PCI_D0)
  521. pci_dev->current_state = PCI_UNKNOWN;
  522. }
  523. /*
  524. * Default "resume" method for devices that have no driver provided resume,
  525. * or not even a driver at all (second part).
  526. */
  527. static int pci_pm_reenable_device(struct pci_dev *pci_dev)
  528. {
  529. int retval;
  530. /* if the device was enabled before suspend, re-enable */
  531. retval = pci_reenable_device(pci_dev);
  532. /*
  533. * if the device was busmaster before the suspend, make it busmaster
  534. * again
  535. */
  536. if (pci_dev->is_busmaster)
  537. pci_set_master(pci_dev);
  538. return retval;
  539. }
  540. static int pci_legacy_suspend(struct device *dev, pm_message_t state)
  541. {
  542. struct pci_dev *pci_dev = to_pci_dev(dev);
  543. struct pci_driver *drv = pci_dev->driver;
  544. if (drv && drv->suspend) {
  545. pci_power_t prev = pci_dev->current_state;
  546. int error;
  547. error = drv->suspend(pci_dev, state);
  548. suspend_report_result(dev, drv->suspend, error);
  549. if (error)
  550. return error;
  551. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  552. && pci_dev->current_state != PCI_UNKNOWN) {
  553. pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
  554. "PCI PM: Device state not saved by %pS\n",
  555. drv->suspend);
  556. }
  557. }
  558. pci_fixup_device(pci_fixup_suspend, pci_dev);
  559. return 0;
  560. }
  561. static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
  562. {
  563. struct pci_dev *pci_dev = to_pci_dev(dev);
  564. if (!pci_dev->state_saved)
  565. pci_save_state(pci_dev);
  566. pci_pm_set_unknown_state(pci_dev);
  567. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  568. return 0;
  569. }
  570. static int pci_legacy_resume(struct device *dev)
  571. {
  572. struct pci_dev *pci_dev = to_pci_dev(dev);
  573. struct pci_driver *drv = pci_dev->driver;
  574. pci_fixup_device(pci_fixup_resume, pci_dev);
  575. return drv && drv->resume ?
  576. drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
  577. }
  578. /* Auxiliary functions used by the new power management framework */
  579. static void pci_pm_default_suspend(struct pci_dev *pci_dev)
  580. {
  581. /* Disable non-bridge devices without PM support */
  582. if (!pci_has_subordinate(pci_dev))
  583. pci_disable_enabled_device(pci_dev);
  584. }
  585. static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
  586. {
  587. struct pci_driver *drv = pci_dev->driver;
  588. bool ret = drv && (drv->suspend || drv->resume);
  589. /*
  590. * Legacy PM support is used by default, so warn if the new framework is
  591. * supported as well. Drivers are supposed to support either the
  592. * former, or the latter, but not both at the same time.
  593. */
  594. pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n",
  595. pci_dev->vendor, pci_dev->device);
  596. return ret;
  597. }
  598. /* New power management framework */
  599. static int pci_pm_prepare(struct device *dev)
  600. {
  601. struct pci_dev *pci_dev = to_pci_dev(dev);
  602. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  603. if (pm && pm->prepare) {
  604. int error = pm->prepare(dev);
  605. if (error < 0)
  606. return error;
  607. if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
  608. return 0;
  609. }
  610. if (pci_dev_need_resume(pci_dev))
  611. return 0;
  612. /*
  613. * The PME setting needs to be adjusted here in case the direct-complete
  614. * optimization is used with respect to this device.
  615. */
  616. pci_dev_adjust_pme(pci_dev);
  617. return 1;
  618. }
  619. static void pci_pm_complete(struct device *dev)
  620. {
  621. struct pci_dev *pci_dev = to_pci_dev(dev);
  622. pci_dev_complete_resume(pci_dev);
  623. pm_generic_complete(dev);
  624. /* Resume device if platform firmware has put it in reset-power-on */
  625. if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
  626. pci_power_t pre_sleep_state = pci_dev->current_state;
  627. pci_refresh_power_state(pci_dev);
  628. /*
  629. * On platforms with ACPI this check may also trigger for
  630. * devices sharing power resources if one of those power
  631. * resources has been activated as a result of a change of the
  632. * power state of another device sharing it. However, in that
  633. * case it is also better to resume the device, in general.
  634. */
  635. if (pci_dev->current_state < pre_sleep_state)
  636. pm_request_resume(dev);
  637. }
  638. }
  639. #else /* !CONFIG_PM_SLEEP */
  640. #define pci_pm_prepare NULL
  641. #define pci_pm_complete NULL
  642. #endif /* !CONFIG_PM_SLEEP */
  643. #ifdef CONFIG_SUSPEND
  644. static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
  645. {
  646. /*
  647. * Some BIOSes forget to clear Root PME Status bits after system
  648. * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
  649. * Clear those bits now just in case (shouldn't hurt).
  650. */
  651. if (pci_is_pcie(pci_dev) &&
  652. (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  653. pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
  654. pcie_clear_root_pme_status(pci_dev);
  655. }
  656. static int pci_pm_suspend(struct device *dev)
  657. {
  658. struct pci_dev *pci_dev = to_pci_dev(dev);
  659. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  660. pci_dev->skip_bus_pm = false;
  661. /*
  662. * Disabling PTM allows some systems, e.g., Intel mobile chips
  663. * since Coffee Lake, to enter a lower-power PM state.
  664. */
  665. pci_suspend_ptm(pci_dev);
  666. if (pci_has_legacy_pm_support(pci_dev))
  667. return pci_legacy_suspend(dev, PMSG_SUSPEND);
  668. if (!pm) {
  669. pci_pm_default_suspend(pci_dev);
  670. return 0;
  671. }
  672. /*
  673. * PCI devices suspended at run time may need to be resumed at this
  674. * point, because in general it may be necessary to reconfigure them for
  675. * system suspend. Namely, if the device is expected to wake up the
  676. * system from the sleep state, it may have to be reconfigured for this
  677. * purpose, or if the device is not expected to wake up the system from
  678. * the sleep state, it should be prevented from signaling wakeup events
  679. * going forward.
  680. *
  681. * Also if the driver of the device does not indicate that its system
  682. * suspend callbacks can cope with runtime-suspended devices, it is
  683. * better to resume the device from runtime suspend here.
  684. */
  685. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  686. pci_dev_need_resume(pci_dev)) {
  687. pm_runtime_resume(dev);
  688. pci_dev->state_saved = false;
  689. } else {
  690. pci_dev_adjust_pme(pci_dev);
  691. }
  692. if (pm->suspend) {
  693. pci_power_t prev = pci_dev->current_state;
  694. int error;
  695. error = pm->suspend(dev);
  696. suspend_report_result(dev, pm->suspend, error);
  697. if (error)
  698. return error;
  699. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  700. && pci_dev->current_state != PCI_UNKNOWN) {
  701. pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
  702. "PCI PM: State of device not saved by %pS\n",
  703. pm->suspend);
  704. }
  705. }
  706. return 0;
  707. }
  708. static int pci_pm_suspend_late(struct device *dev)
  709. {
  710. if (dev_pm_skip_suspend(dev))
  711. return 0;
  712. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  713. return pm_generic_suspend_late(dev);
  714. }
  715. static int pci_pm_suspend_noirq(struct device *dev)
  716. {
  717. struct pci_dev *pci_dev = to_pci_dev(dev);
  718. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  719. if (dev_pm_skip_suspend(dev))
  720. return 0;
  721. if (pci_has_legacy_pm_support(pci_dev))
  722. return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
  723. if (!pm) {
  724. pci_save_state(pci_dev);
  725. goto Fixup;
  726. }
  727. if (pm->suspend_noirq) {
  728. pci_power_t prev = pci_dev->current_state;
  729. int error;
  730. error = pm->suspend_noirq(dev);
  731. suspend_report_result(dev, pm->suspend_noirq, error);
  732. if (error)
  733. return error;
  734. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  735. && pci_dev->current_state != PCI_UNKNOWN) {
  736. pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
  737. "PCI PM: State of device not saved by %pS\n",
  738. pm->suspend_noirq);
  739. goto Fixup;
  740. }
  741. }
  742. if (!pci_dev->state_saved) {
  743. pci_save_state(pci_dev);
  744. /*
  745. * If the device is a bridge with a child in D0 below it,
  746. * it needs to stay in D0, so check skip_bus_pm to avoid
  747. * putting it into a low-power state in that case.
  748. */
  749. if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev))
  750. pci_prepare_to_sleep(pci_dev);
  751. }
  752. pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n",
  753. pci_power_name(pci_dev->current_state));
  754. if (pci_dev->current_state == PCI_D0) {
  755. pci_dev->skip_bus_pm = true;
  756. /*
  757. * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any
  758. * downstream device is in D0, so avoid changing the power state
  759. * of the parent bridge by setting the skip_bus_pm flag for it.
  760. */
  761. if (pci_dev->bus->self)
  762. pci_dev->bus->self->skip_bus_pm = true;
  763. }
  764. if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
  765. pci_dbg(pci_dev, "PCI PM: Skipped\n");
  766. goto Fixup;
  767. }
  768. pci_pm_set_unknown_state(pci_dev);
  769. /*
  770. * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
  771. * PCI COMMAND register isn't 0, the BIOS assumes that the controller
  772. * hasn't been quiesced and tries to turn it off. If the controller
  773. * is already in D3, this can hang or cause memory corruption.
  774. *
  775. * Since the value of the COMMAND register doesn't matter once the
  776. * device has been suspended, we can safely set it to 0 here.
  777. */
  778. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  779. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  780. Fixup:
  781. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  782. /*
  783. * If the target system sleep state is suspend-to-idle, it is sufficient
  784. * to check whether or not the device's wakeup settings are good for
  785. * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
  786. * pci_pm_complete() to take care of fixing up the device's state
  787. * anyway, if need be.
  788. */
  789. if (device_can_wakeup(dev) && !device_may_wakeup(dev))
  790. dev->power.may_skip_resume = false;
  791. return 0;
  792. }
  793. static int pci_pm_resume_noirq(struct device *dev)
  794. {
  795. struct pci_dev *pci_dev = to_pci_dev(dev);
  796. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  797. pci_power_t prev_state = pci_dev->current_state;
  798. bool skip_bus_pm = pci_dev->skip_bus_pm;
  799. if (dev_pm_skip_resume(dev))
  800. return 0;
  801. /*
  802. * In the suspend-to-idle case, devices left in D0 during suspend will
  803. * stay in D0, so it is not necessary to restore or update their
  804. * configuration here and attempting to put them into D0 again is
  805. * pointless, so avoid doing that.
  806. */
  807. if (!(skip_bus_pm && pm_suspend_no_platform()))
  808. pci_pm_default_resume_early(pci_dev);
  809. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  810. pcie_pme_root_status_cleanup(pci_dev);
  811. if (!skip_bus_pm && prev_state == PCI_D3cold)
  812. pci_pm_bridge_power_up_actions(pci_dev);
  813. if (pci_has_legacy_pm_support(pci_dev))
  814. return 0;
  815. if (pm && pm->resume_noirq)
  816. return pm->resume_noirq(dev);
  817. return 0;
  818. }
  819. static int pci_pm_resume_early(struct device *dev)
  820. {
  821. if (dev_pm_skip_resume(dev))
  822. return 0;
  823. return pm_generic_resume_early(dev);
  824. }
  825. static int pci_pm_resume(struct device *dev)
  826. {
  827. struct pci_dev *pci_dev = to_pci_dev(dev);
  828. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  829. /*
  830. * This is necessary for the suspend error path in which resume is
  831. * called without restoring the standard config registers of the device.
  832. */
  833. if (pci_dev->state_saved)
  834. pci_restore_standard_config(pci_dev);
  835. pci_resume_ptm(pci_dev);
  836. if (pci_has_legacy_pm_support(pci_dev))
  837. return pci_legacy_resume(dev);
  838. pci_pm_default_resume(pci_dev);
  839. if (pm) {
  840. if (pm->resume)
  841. return pm->resume(dev);
  842. } else {
  843. pci_pm_reenable_device(pci_dev);
  844. }
  845. return 0;
  846. }
  847. #else /* !CONFIG_SUSPEND */
  848. #define pci_pm_suspend NULL
  849. #define pci_pm_suspend_late NULL
  850. #define pci_pm_suspend_noirq NULL
  851. #define pci_pm_resume NULL
  852. #define pci_pm_resume_early NULL
  853. #define pci_pm_resume_noirq NULL
  854. #endif /* !CONFIG_SUSPEND */
  855. #ifdef CONFIG_HIBERNATE_CALLBACKS
  856. static int pci_pm_freeze(struct device *dev)
  857. {
  858. struct pci_dev *pci_dev = to_pci_dev(dev);
  859. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  860. if (pci_has_legacy_pm_support(pci_dev))
  861. return pci_legacy_suspend(dev, PMSG_FREEZE);
  862. if (!pm) {
  863. pci_pm_default_suspend(pci_dev);
  864. return 0;
  865. }
  866. /*
  867. * Resume all runtime-suspended devices before creating a snapshot
  868. * image of system memory, because the restore kernel generally cannot
  869. * be expected to always handle them consistently and they need to be
  870. * put into the runtime-active metastate during system resume anyway,
  871. * so it is better to ensure that the state saved in the image will be
  872. * always consistent with that.
  873. */
  874. pm_runtime_resume(dev);
  875. pci_dev->state_saved = false;
  876. if (pm->freeze) {
  877. int error;
  878. error = pm->freeze(dev);
  879. suspend_report_result(dev, pm->freeze, error);
  880. if (error)
  881. return error;
  882. }
  883. return 0;
  884. }
  885. static int pci_pm_freeze_noirq(struct device *dev)
  886. {
  887. struct pci_dev *pci_dev = to_pci_dev(dev);
  888. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  889. if (pci_has_legacy_pm_support(pci_dev))
  890. return pci_legacy_suspend_late(dev, PMSG_FREEZE);
  891. if (pm && pm->freeze_noirq) {
  892. int error;
  893. error = pm->freeze_noirq(dev);
  894. suspend_report_result(dev, pm->freeze_noirq, error);
  895. if (error)
  896. return error;
  897. }
  898. if (!pci_dev->state_saved)
  899. pci_save_state(pci_dev);
  900. pci_pm_set_unknown_state(pci_dev);
  901. return 0;
  902. }
  903. static int pci_pm_thaw_noirq(struct device *dev)
  904. {
  905. struct pci_dev *pci_dev = to_pci_dev(dev);
  906. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  907. /*
  908. * The pm->thaw_noirq() callback assumes the device has been
  909. * returned to D0 and its config state has been restored.
  910. *
  911. * In addition, pci_restore_state() restores MSI-X state in MMIO
  912. * space, which requires the device to be in D0, so return it to D0
  913. * in case the driver's "freeze" callbacks put it into a low-power
  914. * state.
  915. */
  916. pci_pm_power_up_and_verify_state(pci_dev);
  917. pci_restore_state(pci_dev);
  918. if (pci_has_legacy_pm_support(pci_dev))
  919. return 0;
  920. if (pm && pm->thaw_noirq)
  921. return pm->thaw_noirq(dev);
  922. return 0;
  923. }
  924. static int pci_pm_thaw(struct device *dev)
  925. {
  926. struct pci_dev *pci_dev = to_pci_dev(dev);
  927. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  928. int error = 0;
  929. if (pci_has_legacy_pm_support(pci_dev))
  930. return pci_legacy_resume(dev);
  931. if (pm) {
  932. if (pm->thaw)
  933. error = pm->thaw(dev);
  934. } else {
  935. pci_pm_reenable_device(pci_dev);
  936. }
  937. pci_dev->state_saved = false;
  938. return error;
  939. }
  940. static int pci_pm_poweroff(struct device *dev)
  941. {
  942. struct pci_dev *pci_dev = to_pci_dev(dev);
  943. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  944. if (pci_has_legacy_pm_support(pci_dev))
  945. return pci_legacy_suspend(dev, PMSG_HIBERNATE);
  946. if (!pm) {
  947. pci_pm_default_suspend(pci_dev);
  948. return 0;
  949. }
  950. /* The reason to do that is the same as in pci_pm_suspend(). */
  951. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  952. pci_dev_need_resume(pci_dev)) {
  953. pm_runtime_resume(dev);
  954. pci_dev->state_saved = false;
  955. } else {
  956. pci_dev_adjust_pme(pci_dev);
  957. }
  958. if (pm->poweroff) {
  959. int error;
  960. error = pm->poweroff(dev);
  961. suspend_report_result(dev, pm->poweroff, error);
  962. if (error)
  963. return error;
  964. }
  965. return 0;
  966. }
  967. static int pci_pm_poweroff_late(struct device *dev)
  968. {
  969. if (dev_pm_skip_suspend(dev))
  970. return 0;
  971. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  972. return pm_generic_poweroff_late(dev);
  973. }
  974. static int pci_pm_poweroff_noirq(struct device *dev)
  975. {
  976. struct pci_dev *pci_dev = to_pci_dev(dev);
  977. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  978. if (dev_pm_skip_suspend(dev))
  979. return 0;
  980. if (pci_has_legacy_pm_support(pci_dev))
  981. return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
  982. if (!pm) {
  983. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  984. return 0;
  985. }
  986. if (pm->poweroff_noirq) {
  987. int error;
  988. error = pm->poweroff_noirq(dev);
  989. suspend_report_result(dev, pm->poweroff_noirq, error);
  990. if (error)
  991. return error;
  992. }
  993. if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
  994. pci_prepare_to_sleep(pci_dev);
  995. /*
  996. * The reason for doing this here is the same as for the analogous code
  997. * in pci_pm_suspend_noirq().
  998. */
  999. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  1000. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  1001. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  1002. return 0;
  1003. }
  1004. static int pci_pm_restore_noirq(struct device *dev)
  1005. {
  1006. struct pci_dev *pci_dev = to_pci_dev(dev);
  1007. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1008. pci_pm_default_resume_early(pci_dev);
  1009. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  1010. if (pci_has_legacy_pm_support(pci_dev))
  1011. return 0;
  1012. if (pm && pm->restore_noirq)
  1013. return pm->restore_noirq(dev);
  1014. return 0;
  1015. }
  1016. static int pci_pm_restore(struct device *dev)
  1017. {
  1018. struct pci_dev *pci_dev = to_pci_dev(dev);
  1019. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1020. /*
  1021. * This is necessary for the hibernation error path in which restore is
  1022. * called without restoring the standard config registers of the device.
  1023. */
  1024. if (pci_dev->state_saved)
  1025. pci_restore_standard_config(pci_dev);
  1026. if (pci_has_legacy_pm_support(pci_dev))
  1027. return pci_legacy_resume(dev);
  1028. pci_pm_default_resume(pci_dev);
  1029. if (pm) {
  1030. if (pm->restore)
  1031. return pm->restore(dev);
  1032. } else {
  1033. pci_pm_reenable_device(pci_dev);
  1034. }
  1035. return 0;
  1036. }
  1037. #else /* !CONFIG_HIBERNATE_CALLBACKS */
  1038. #define pci_pm_freeze NULL
  1039. #define pci_pm_freeze_noirq NULL
  1040. #define pci_pm_thaw NULL
  1041. #define pci_pm_thaw_noirq NULL
  1042. #define pci_pm_poweroff NULL
  1043. #define pci_pm_poweroff_late NULL
  1044. #define pci_pm_poweroff_noirq NULL
  1045. #define pci_pm_restore NULL
  1046. #define pci_pm_restore_noirq NULL
  1047. #endif /* !CONFIG_HIBERNATE_CALLBACKS */
  1048. #ifdef CONFIG_PM
  1049. static int pci_pm_runtime_suspend(struct device *dev)
  1050. {
  1051. struct pci_dev *pci_dev = to_pci_dev(dev);
  1052. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1053. pci_power_t prev = pci_dev->current_state;
  1054. int error;
  1055. pci_suspend_ptm(pci_dev);
  1056. /*
  1057. * If pci_dev->driver is not set (unbound), we leave the device in D0,
  1058. * but it may go to D3cold when the bridge above it runtime suspends.
  1059. * Save its config space in case that happens.
  1060. */
  1061. if (!pci_dev->driver) {
  1062. pci_save_state(pci_dev);
  1063. return 0;
  1064. }
  1065. pci_dev->state_saved = false;
  1066. if (pm && pm->runtime_suspend) {
  1067. error = pm->runtime_suspend(dev);
  1068. /*
  1069. * -EBUSY and -EAGAIN is used to request the runtime PM core
  1070. * to schedule a new suspend, so log the event only with debug
  1071. * log level.
  1072. */
  1073. if (error == -EBUSY || error == -EAGAIN) {
  1074. pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n",
  1075. pm->runtime_suspend, error);
  1076. return error;
  1077. } else if (error) {
  1078. pci_err(pci_dev, "can't suspend (%ps returned %d)\n",
  1079. pm->runtime_suspend, error);
  1080. return error;
  1081. }
  1082. }
  1083. pci_fixup_device(pci_fixup_suspend, pci_dev);
  1084. if (pm && pm->runtime_suspend
  1085. && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
  1086. && pci_dev->current_state != PCI_UNKNOWN) {
  1087. pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
  1088. "PCI PM: State of device not saved by %pS\n",
  1089. pm->runtime_suspend);
  1090. return 0;
  1091. }
  1092. if (!pci_dev->state_saved) {
  1093. pci_save_state(pci_dev);
  1094. pci_finish_runtime_suspend(pci_dev);
  1095. }
  1096. return 0;
  1097. }
  1098. static int pci_pm_runtime_resume(struct device *dev)
  1099. {
  1100. struct pci_dev *pci_dev = to_pci_dev(dev);
  1101. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1102. pci_power_t prev_state = pci_dev->current_state;
  1103. int error = 0;
  1104. /*
  1105. * Restoring config space is necessary even if the device is not bound
  1106. * to a driver because although we left it in D0, it may have gone to
  1107. * D3cold when the bridge above it runtime suspended.
  1108. */
  1109. pci_pm_default_resume_early(pci_dev);
  1110. pci_resume_ptm(pci_dev);
  1111. if (!pci_dev->driver)
  1112. return 0;
  1113. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  1114. pci_pm_default_resume(pci_dev);
  1115. if (prev_state == PCI_D3cold)
  1116. pci_pm_bridge_power_up_actions(pci_dev);
  1117. if (pm && pm->runtime_resume)
  1118. error = pm->runtime_resume(dev);
  1119. return error;
  1120. }
  1121. static int pci_pm_runtime_idle(struct device *dev)
  1122. {
  1123. struct pci_dev *pci_dev = to_pci_dev(dev);
  1124. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1125. /*
  1126. * If pci_dev->driver is not set (unbound), the device should
  1127. * always remain in D0 regardless of the runtime PM status
  1128. */
  1129. if (!pci_dev->driver)
  1130. return 0;
  1131. if (!pm)
  1132. return -ENOSYS;
  1133. if (pm->runtime_idle)
  1134. return pm->runtime_idle(dev);
  1135. return 0;
  1136. }
  1137. static const struct dev_pm_ops pci_dev_pm_ops = {
  1138. .prepare = pci_pm_prepare,
  1139. .complete = pci_pm_complete,
  1140. .suspend = pci_pm_suspend,
  1141. .suspend_late = pci_pm_suspend_late,
  1142. .resume = pci_pm_resume,
  1143. .resume_early = pci_pm_resume_early,
  1144. .freeze = pci_pm_freeze,
  1145. .thaw = pci_pm_thaw,
  1146. .poweroff = pci_pm_poweroff,
  1147. .poweroff_late = pci_pm_poweroff_late,
  1148. .restore = pci_pm_restore,
  1149. .suspend_noirq = pci_pm_suspend_noirq,
  1150. .resume_noirq = pci_pm_resume_noirq,
  1151. .freeze_noirq = pci_pm_freeze_noirq,
  1152. .thaw_noirq = pci_pm_thaw_noirq,
  1153. .poweroff_noirq = pci_pm_poweroff_noirq,
  1154. .restore_noirq = pci_pm_restore_noirq,
  1155. .runtime_suspend = pci_pm_runtime_suspend,
  1156. .runtime_resume = pci_pm_runtime_resume,
  1157. .runtime_idle = pci_pm_runtime_idle,
  1158. };
  1159. #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
  1160. #else /* !CONFIG_PM */
  1161. #define pci_pm_runtime_suspend NULL
  1162. #define pci_pm_runtime_resume NULL
  1163. #define pci_pm_runtime_idle NULL
  1164. #define PCI_PM_OPS_PTR NULL
  1165. #endif /* !CONFIG_PM */
  1166. /**
  1167. * __pci_register_driver - register a new pci driver
  1168. * @drv: the driver structure to register
  1169. * @owner: owner module of drv
  1170. * @mod_name: module name string
  1171. *
  1172. * Adds the driver structure to the list of registered drivers.
  1173. * Returns a negative value on error, otherwise 0.
  1174. * If no error occurred, the driver remains registered even if
  1175. * no device was claimed during registration.
  1176. */
  1177. int __pci_register_driver(struct pci_driver *drv, struct module *owner,
  1178. const char *mod_name)
  1179. {
  1180. /* initialize common driver fields */
  1181. drv->driver.name = drv->name;
  1182. drv->driver.bus = &pci_bus_type;
  1183. drv->driver.owner = owner;
  1184. drv->driver.mod_name = mod_name;
  1185. drv->driver.groups = drv->groups;
  1186. drv->driver.dev_groups = drv->dev_groups;
  1187. spin_lock_init(&drv->dynids.lock);
  1188. INIT_LIST_HEAD(&drv->dynids.list);
  1189. /* register with core */
  1190. return driver_register(&drv->driver);
  1191. }
  1192. EXPORT_SYMBOL(__pci_register_driver);
  1193. /**
  1194. * pci_unregister_driver - unregister a pci driver
  1195. * @drv: the driver structure to unregister
  1196. *
  1197. * Deletes the driver structure from the list of registered PCI drivers,
  1198. * gives it a chance to clean up by calling its remove() function for
  1199. * each device it was responsible for, and marks those devices as
  1200. * driverless.
  1201. */
  1202. void pci_unregister_driver(struct pci_driver *drv)
  1203. {
  1204. driver_unregister(&drv->driver);
  1205. pci_free_dynids(drv);
  1206. }
  1207. EXPORT_SYMBOL(pci_unregister_driver);
  1208. static struct pci_driver pci_compat_driver = {
  1209. .name = "compat"
  1210. };
  1211. /**
  1212. * pci_dev_driver - get the pci_driver of a device
  1213. * @dev: the device to query
  1214. *
  1215. * Returns the appropriate pci_driver structure or %NULL if there is no
  1216. * registered driver for the device.
  1217. */
  1218. struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
  1219. {
  1220. if (dev->driver)
  1221. return dev->driver;
  1222. else {
  1223. int i;
  1224. for (i = 0; i <= PCI_ROM_RESOURCE; i++)
  1225. if (dev->resource[i].flags & IORESOURCE_BUSY)
  1226. return &pci_compat_driver;
  1227. }
  1228. return NULL;
  1229. }
  1230. EXPORT_SYMBOL(pci_dev_driver);
  1231. /**
  1232. * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
  1233. * @dev: the PCI device structure to match against
  1234. * @drv: the device driver to search for matching PCI device id structures
  1235. *
  1236. * Used by a driver to check whether a PCI device present in the
  1237. * system is in its list of supported devices. Returns the matching
  1238. * pci_device_id structure or %NULL if there is no match.
  1239. */
  1240. static int pci_bus_match(struct device *dev, struct device_driver *drv)
  1241. {
  1242. struct pci_dev *pci_dev = to_pci_dev(dev);
  1243. struct pci_driver *pci_drv;
  1244. const struct pci_device_id *found_id;
  1245. if (!pci_dev->match_driver)
  1246. return 0;
  1247. pci_drv = to_pci_driver(drv);
  1248. found_id = pci_match_device(pci_drv, pci_dev);
  1249. if (found_id)
  1250. return 1;
  1251. return 0;
  1252. }
  1253. /**
  1254. * pci_dev_get - increments the reference count of the pci device structure
  1255. * @dev: the device being referenced
  1256. *
  1257. * Each live reference to a device should be refcounted.
  1258. *
  1259. * Drivers for PCI devices should normally record such references in
  1260. * their probe() methods, when they bind to a device, and release
  1261. * them by calling pci_dev_put(), in their disconnect() methods.
  1262. *
  1263. * A pointer to the device with the incremented reference counter is returned.
  1264. */
  1265. struct pci_dev *pci_dev_get(struct pci_dev *dev)
  1266. {
  1267. if (dev)
  1268. get_device(&dev->dev);
  1269. return dev;
  1270. }
  1271. EXPORT_SYMBOL(pci_dev_get);
  1272. /**
  1273. * pci_dev_put - release a use of the pci device structure
  1274. * @dev: device that's been disconnected
  1275. *
  1276. * Must be called when a user of a device is finished with it. When the last
  1277. * user of the device calls this function, the memory of the device is freed.
  1278. */
  1279. void pci_dev_put(struct pci_dev *dev)
  1280. {
  1281. if (dev)
  1282. put_device(&dev->dev);
  1283. }
  1284. EXPORT_SYMBOL(pci_dev_put);
  1285. static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
  1286. {
  1287. struct pci_dev *pdev;
  1288. if (!dev)
  1289. return -ENODEV;
  1290. pdev = to_pci_dev(dev);
  1291. if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
  1292. return -ENOMEM;
  1293. if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
  1294. return -ENOMEM;
  1295. if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
  1296. pdev->subsystem_device))
  1297. return -ENOMEM;
  1298. if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
  1299. return -ENOMEM;
  1300. if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
  1301. pdev->vendor, pdev->device,
  1302. pdev->subsystem_vendor, pdev->subsystem_device,
  1303. (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
  1304. (u8)(pdev->class)))
  1305. return -ENOMEM;
  1306. return 0;
  1307. }
  1308. #if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
  1309. /**
  1310. * pci_uevent_ers - emit a uevent during recovery path of PCI device
  1311. * @pdev: PCI device undergoing error recovery
  1312. * @err_type: type of error event
  1313. */
  1314. void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
  1315. {
  1316. int idx = 0;
  1317. char *envp[3];
  1318. switch (err_type) {
  1319. case PCI_ERS_RESULT_NONE:
  1320. case PCI_ERS_RESULT_CAN_RECOVER:
  1321. envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
  1322. envp[idx++] = "DEVICE_ONLINE=0";
  1323. break;
  1324. case PCI_ERS_RESULT_RECOVERED:
  1325. envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
  1326. envp[idx++] = "DEVICE_ONLINE=1";
  1327. break;
  1328. case PCI_ERS_RESULT_DISCONNECT:
  1329. envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
  1330. envp[idx++] = "DEVICE_ONLINE=0";
  1331. break;
  1332. default:
  1333. break;
  1334. }
  1335. if (idx > 0) {
  1336. envp[idx++] = NULL;
  1337. kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
  1338. }
  1339. }
  1340. #endif
  1341. static int pci_bus_num_vf(struct device *dev)
  1342. {
  1343. return pci_num_vf(to_pci_dev(dev));
  1344. }
  1345. /**
  1346. * pci_dma_configure - Setup DMA configuration
  1347. * @dev: ptr to dev structure
  1348. *
  1349. * Function to update PCI devices's DMA configuration using the same
  1350. * info from the OF node or ACPI node of host bridge's parent (if any).
  1351. */
  1352. static int pci_dma_configure(struct device *dev)
  1353. {
  1354. struct pci_driver *driver = to_pci_driver(dev->driver);
  1355. struct device *bridge;
  1356. int ret = 0;
  1357. bridge = pci_get_host_bridge_device(to_pci_dev(dev));
  1358. if (IS_ENABLED(CONFIG_OF) && bridge->parent &&
  1359. bridge->parent->of_node) {
  1360. ret = of_dma_configure(dev, bridge->parent->of_node, true);
  1361. } else if (has_acpi_companion(bridge)) {
  1362. struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
  1363. ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev));
  1364. }
  1365. pci_put_host_bridge_device(bridge);
  1366. if (!ret && !driver->driver_managed_dma) {
  1367. ret = iommu_device_use_default_domain(dev);
  1368. if (ret)
  1369. arch_teardown_dma_ops(dev);
  1370. }
  1371. return ret;
  1372. }
  1373. static void pci_dma_cleanup(struct device *dev)
  1374. {
  1375. struct pci_driver *driver = to_pci_driver(dev->driver);
  1376. if (!driver->driver_managed_dma)
  1377. iommu_device_unuse_default_domain(dev);
  1378. }
  1379. struct bus_type pci_bus_type = {
  1380. .name = "pci",
  1381. .match = pci_bus_match,
  1382. .uevent = pci_uevent,
  1383. .probe = pci_device_probe,
  1384. .remove = pci_device_remove,
  1385. .shutdown = pci_device_shutdown,
  1386. .dev_groups = pci_dev_groups,
  1387. .bus_groups = pci_bus_groups,
  1388. .drv_groups = pci_drv_groups,
  1389. .pm = PCI_PM_OPS_PTR,
  1390. .num_vf = pci_bus_num_vf,
  1391. .dma_configure = pci_dma_configure,
  1392. .dma_cleanup = pci_dma_cleanup,
  1393. };
  1394. EXPORT_SYMBOL(pci_bus_type);
  1395. #ifdef CONFIG_PCIEPORTBUS
  1396. static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
  1397. {
  1398. struct pcie_device *pciedev;
  1399. struct pcie_port_service_driver *driver;
  1400. if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
  1401. return 0;
  1402. pciedev = to_pcie_device(dev);
  1403. driver = to_service_driver(drv);
  1404. if (driver->service != pciedev->service)
  1405. return 0;
  1406. if (driver->port_type != PCIE_ANY_PORT &&
  1407. driver->port_type != pci_pcie_type(pciedev->port))
  1408. return 0;
  1409. return 1;
  1410. }
  1411. struct bus_type pcie_port_bus_type = {
  1412. .name = "pci_express",
  1413. .match = pcie_port_bus_match,
  1414. };
  1415. EXPORT_SYMBOL_GPL(pcie_port_bus_type);
  1416. #endif
  1417. static int __init pci_driver_init(void)
  1418. {
  1419. int ret;
  1420. ret = bus_register(&pci_bus_type);
  1421. if (ret)
  1422. return ret;
  1423. #ifdef CONFIG_PCIEPORTBUS
  1424. ret = bus_register(&pcie_port_bus_type);
  1425. if (ret)
  1426. return ret;
  1427. #endif
  1428. dma_debug_add_bus(&pci_bus_type);
  1429. return 0;
  1430. }
  1431. postcore_initcall(pci_driver_init);