pci-acpi.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI support in ACPI
  4. *
  5. * Copyright (C) 2005 David Shaohua Li <[email protected]>
  6. * Copyright (C) 2004 Tom Long Nguyen <[email protected]>
  7. * Copyright (C) 2004 Intel Corp.
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/init.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/pci.h>
  13. #include <linux/msi.h>
  14. #include <linux/pci_hotplug.h>
  15. #include <linux/module.h>
  16. #include <linux/pci-acpi.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/pm_qos.h>
  19. #include <linux/rwsem.h>
  20. #include "pci.h"
  21. /*
  22. * The GUID is defined in the PCI Firmware Specification available
  23. * here to PCI-SIG members:
  24. * https://members.pcisig.com/wg/PCI-SIG/document/15350
  25. */
  26. const guid_t pci_acpi_dsm_guid =
  27. GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  28. 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  29. #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  30. static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  31. {
  32. struct device *dev = &adev->dev;
  33. struct resource_entry *entry;
  34. struct list_head list;
  35. unsigned long flags;
  36. int ret;
  37. INIT_LIST_HEAD(&list);
  38. flags = IORESOURCE_MEM;
  39. ret = acpi_dev_get_resources(adev, &list,
  40. acpi_dev_filter_resource_type_cb,
  41. (void *) flags);
  42. if (ret < 0) {
  43. dev_err(dev, "failed to parse _CRS method, error code %d\n",
  44. ret);
  45. return ret;
  46. }
  47. if (ret == 0) {
  48. dev_err(dev, "no IO and memory resources present in _CRS\n");
  49. return -EINVAL;
  50. }
  51. entry = list_first_entry(&list, struct resource_entry, node);
  52. *res = *entry->res;
  53. acpi_dev_free_resource_list(&list);
  54. return 0;
  55. }
  56. static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  57. void **retval)
  58. {
  59. u16 *segment = context;
  60. unsigned long long uid;
  61. acpi_status status;
  62. status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  63. if (ACPI_FAILURE(status) || uid != *segment)
  64. return AE_CTRL_DEPTH;
  65. *(acpi_handle *)retval = handle;
  66. return AE_CTRL_TERMINATE;
  67. }
  68. int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  69. struct resource *res)
  70. {
  71. struct acpi_device *adev;
  72. acpi_status status;
  73. acpi_handle handle;
  74. int ret;
  75. status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  76. if (ACPI_FAILURE(status)) {
  77. dev_err(dev, "can't find _HID %s device to locate resources\n",
  78. hid);
  79. return -ENODEV;
  80. }
  81. adev = acpi_fetch_acpi_dev(handle);
  82. if (!adev)
  83. return -ENODEV;
  84. ret = acpi_get_rc_addr(adev, res);
  85. if (ret) {
  86. dev_err(dev, "can't get resource from %s\n",
  87. dev_name(&adev->dev));
  88. return ret;
  89. }
  90. return 0;
  91. }
  92. #endif
  93. phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
  94. {
  95. acpi_status status = AE_NOT_EXIST;
  96. unsigned long long mcfg_addr;
  97. if (handle)
  98. status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
  99. NULL, &mcfg_addr);
  100. if (ACPI_FAILURE(status))
  101. return 0;
  102. return (phys_addr_t)mcfg_addr;
  103. }
  104. /* _HPX PCI Setting Record (Type 0); same as _HPP */
  105. struct hpx_type0 {
  106. u32 revision; /* Not present in _HPP */
  107. u8 cache_line_size; /* Not applicable to PCIe */
  108. u8 latency_timer; /* Not applicable to PCIe */
  109. u8 enable_serr;
  110. u8 enable_perr;
  111. };
  112. static struct hpx_type0 pci_default_type0 = {
  113. .revision = 1,
  114. .cache_line_size = 8,
  115. .latency_timer = 0x40,
  116. .enable_serr = 0,
  117. .enable_perr = 0,
  118. };
  119. static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
  120. {
  121. u16 pci_cmd, pci_bctl;
  122. if (!hpx)
  123. hpx = &pci_default_type0;
  124. if (hpx->revision > 1) {
  125. pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
  126. hpx->revision);
  127. hpx = &pci_default_type0;
  128. }
  129. pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
  130. pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
  131. pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
  132. if (hpx->enable_serr)
  133. pci_cmd |= PCI_COMMAND_SERR;
  134. if (hpx->enable_perr)
  135. pci_cmd |= PCI_COMMAND_PARITY;
  136. pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
  137. /* Program bridge control value */
  138. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
  139. pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
  140. hpx->latency_timer);
  141. pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
  142. if (hpx->enable_perr)
  143. pci_bctl |= PCI_BRIDGE_CTL_PARITY;
  144. pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
  145. }
  146. }
  147. static acpi_status decode_type0_hpx_record(union acpi_object *record,
  148. struct hpx_type0 *hpx0)
  149. {
  150. int i;
  151. union acpi_object *fields = record->package.elements;
  152. u32 revision = fields[1].integer.value;
  153. switch (revision) {
  154. case 1:
  155. if (record->package.count != 6)
  156. return AE_ERROR;
  157. for (i = 2; i < 6; i++)
  158. if (fields[i].type != ACPI_TYPE_INTEGER)
  159. return AE_ERROR;
  160. hpx0->revision = revision;
  161. hpx0->cache_line_size = fields[2].integer.value;
  162. hpx0->latency_timer = fields[3].integer.value;
  163. hpx0->enable_serr = fields[4].integer.value;
  164. hpx0->enable_perr = fields[5].integer.value;
  165. break;
  166. default:
  167. pr_warn("%s: Type 0 Revision %d record not supported\n",
  168. __func__, revision);
  169. return AE_ERROR;
  170. }
  171. return AE_OK;
  172. }
  173. /* _HPX PCI-X Setting Record (Type 1) */
  174. struct hpx_type1 {
  175. u32 revision;
  176. u8 max_mem_read;
  177. u8 avg_max_split;
  178. u16 tot_max_split;
  179. };
  180. static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
  181. {
  182. int pos;
  183. if (!hpx)
  184. return;
  185. pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
  186. if (!pos)
  187. return;
  188. pci_warn(dev, "PCI-X settings not supported\n");
  189. }
  190. static acpi_status decode_type1_hpx_record(union acpi_object *record,
  191. struct hpx_type1 *hpx1)
  192. {
  193. int i;
  194. union acpi_object *fields = record->package.elements;
  195. u32 revision = fields[1].integer.value;
  196. switch (revision) {
  197. case 1:
  198. if (record->package.count != 5)
  199. return AE_ERROR;
  200. for (i = 2; i < 5; i++)
  201. if (fields[i].type != ACPI_TYPE_INTEGER)
  202. return AE_ERROR;
  203. hpx1->revision = revision;
  204. hpx1->max_mem_read = fields[2].integer.value;
  205. hpx1->avg_max_split = fields[3].integer.value;
  206. hpx1->tot_max_split = fields[4].integer.value;
  207. break;
  208. default:
  209. pr_warn("%s: Type 1 Revision %d record not supported\n",
  210. __func__, revision);
  211. return AE_ERROR;
  212. }
  213. return AE_OK;
  214. }
  215. static bool pcie_root_rcb_set(struct pci_dev *dev)
  216. {
  217. struct pci_dev *rp = pcie_find_root_port(dev);
  218. u16 lnkctl;
  219. if (!rp)
  220. return false;
  221. pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
  222. if (lnkctl & PCI_EXP_LNKCTL_RCB)
  223. return true;
  224. return false;
  225. }
  226. /* _HPX PCI Express Setting Record (Type 2) */
  227. struct hpx_type2 {
  228. u32 revision;
  229. u32 unc_err_mask_and;
  230. u32 unc_err_mask_or;
  231. u32 unc_err_sever_and;
  232. u32 unc_err_sever_or;
  233. u32 cor_err_mask_and;
  234. u32 cor_err_mask_or;
  235. u32 adv_err_cap_and;
  236. u32 adv_err_cap_or;
  237. u16 pci_exp_devctl_and;
  238. u16 pci_exp_devctl_or;
  239. u16 pci_exp_lnkctl_and;
  240. u16 pci_exp_lnkctl_or;
  241. u32 sec_unc_err_sever_and;
  242. u32 sec_unc_err_sever_or;
  243. u32 sec_unc_err_mask_and;
  244. u32 sec_unc_err_mask_or;
  245. };
  246. static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
  247. {
  248. int pos;
  249. u32 reg32;
  250. if (!hpx)
  251. return;
  252. if (!pci_is_pcie(dev))
  253. return;
  254. if (hpx->revision > 1) {
  255. pci_warn(dev, "PCIe settings rev %d not supported\n",
  256. hpx->revision);
  257. return;
  258. }
  259. /*
  260. * Don't allow _HPX to change MPS or MRRS settings. We manage
  261. * those to make sure they're consistent with the rest of the
  262. * platform.
  263. */
  264. hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
  265. PCI_EXP_DEVCTL_READRQ;
  266. hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
  267. PCI_EXP_DEVCTL_READRQ);
  268. /* Initialize Device Control Register */
  269. pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
  270. ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
  271. /* Initialize Link Control Register */
  272. if (pcie_cap_has_lnkctl(dev)) {
  273. /*
  274. * If the Root Port supports Read Completion Boundary of
  275. * 128, set RCB to 128. Otherwise, clear it.
  276. */
  277. hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
  278. hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
  279. if (pcie_root_rcb_set(dev))
  280. hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
  281. pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
  282. ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
  283. }
  284. /* Find Advanced Error Reporting Enhanced Capability */
  285. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  286. if (!pos)
  287. return;
  288. /* Initialize Uncorrectable Error Mask Register */
  289. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
  290. reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
  291. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
  292. /* Initialize Uncorrectable Error Severity Register */
  293. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
  294. reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
  295. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
  296. /* Initialize Correctable Error Mask Register */
  297. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
  298. reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
  299. pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
  300. /* Initialize Advanced Error Capabilities and Control Register */
  301. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
  302. reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
  303. /* Don't enable ECRC generation or checking if unsupported */
  304. if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
  305. reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
  306. if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
  307. reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
  308. pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
  309. /*
  310. * FIXME: The following two registers are not supported yet.
  311. *
  312. * o Secondary Uncorrectable Error Severity Register
  313. * o Secondary Uncorrectable Error Mask Register
  314. */
  315. }
  316. static acpi_status decode_type2_hpx_record(union acpi_object *record,
  317. struct hpx_type2 *hpx2)
  318. {
  319. int i;
  320. union acpi_object *fields = record->package.elements;
  321. u32 revision = fields[1].integer.value;
  322. switch (revision) {
  323. case 1:
  324. if (record->package.count != 18)
  325. return AE_ERROR;
  326. for (i = 2; i < 18; i++)
  327. if (fields[i].type != ACPI_TYPE_INTEGER)
  328. return AE_ERROR;
  329. hpx2->revision = revision;
  330. hpx2->unc_err_mask_and = fields[2].integer.value;
  331. hpx2->unc_err_mask_or = fields[3].integer.value;
  332. hpx2->unc_err_sever_and = fields[4].integer.value;
  333. hpx2->unc_err_sever_or = fields[5].integer.value;
  334. hpx2->cor_err_mask_and = fields[6].integer.value;
  335. hpx2->cor_err_mask_or = fields[7].integer.value;
  336. hpx2->adv_err_cap_and = fields[8].integer.value;
  337. hpx2->adv_err_cap_or = fields[9].integer.value;
  338. hpx2->pci_exp_devctl_and = fields[10].integer.value;
  339. hpx2->pci_exp_devctl_or = fields[11].integer.value;
  340. hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
  341. hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
  342. hpx2->sec_unc_err_sever_and = fields[14].integer.value;
  343. hpx2->sec_unc_err_sever_or = fields[15].integer.value;
  344. hpx2->sec_unc_err_mask_and = fields[16].integer.value;
  345. hpx2->sec_unc_err_mask_or = fields[17].integer.value;
  346. break;
  347. default:
  348. pr_warn("%s: Type 2 Revision %d record not supported\n",
  349. __func__, revision);
  350. return AE_ERROR;
  351. }
  352. return AE_OK;
  353. }
  354. /* _HPX PCI Express Setting Record (Type 3) */
  355. struct hpx_type3 {
  356. u16 device_type;
  357. u16 function_type;
  358. u16 config_space_location;
  359. u16 pci_exp_cap_id;
  360. u16 pci_exp_cap_ver;
  361. u16 pci_exp_vendor_id;
  362. u16 dvsec_id;
  363. u16 dvsec_rev;
  364. u16 match_offset;
  365. u32 match_mask_and;
  366. u32 match_value;
  367. u16 reg_offset;
  368. u32 reg_mask_and;
  369. u32 reg_mask_or;
  370. };
  371. enum hpx_type3_dev_type {
  372. HPX_TYPE_ENDPOINT = BIT(0),
  373. HPX_TYPE_LEG_END = BIT(1),
  374. HPX_TYPE_RC_END = BIT(2),
  375. HPX_TYPE_RC_EC = BIT(3),
  376. HPX_TYPE_ROOT_PORT = BIT(4),
  377. HPX_TYPE_UPSTREAM = BIT(5),
  378. HPX_TYPE_DOWNSTREAM = BIT(6),
  379. HPX_TYPE_PCI_BRIDGE = BIT(7),
  380. HPX_TYPE_PCIE_BRIDGE = BIT(8),
  381. };
  382. static u16 hpx3_device_type(struct pci_dev *dev)
  383. {
  384. u16 pcie_type = pci_pcie_type(dev);
  385. static const int pcie_to_hpx3_type[] = {
  386. [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
  387. [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
  388. [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
  389. [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
  390. [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
  391. [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
  392. [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
  393. [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
  394. [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
  395. };
  396. if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
  397. return 0;
  398. return pcie_to_hpx3_type[pcie_type];
  399. }
  400. enum hpx_type3_fn_type {
  401. HPX_FN_NORMAL = BIT(0),
  402. HPX_FN_SRIOV_PHYS = BIT(1),
  403. HPX_FN_SRIOV_VIRT = BIT(2),
  404. };
  405. static u8 hpx3_function_type(struct pci_dev *dev)
  406. {
  407. if (dev->is_virtfn)
  408. return HPX_FN_SRIOV_VIRT;
  409. else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
  410. return HPX_FN_SRIOV_PHYS;
  411. else
  412. return HPX_FN_NORMAL;
  413. }
  414. static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
  415. {
  416. u8 cap_ver = hpx3_cap_id & 0xf;
  417. if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
  418. return true;
  419. else if (cap_ver == pcie_cap_id)
  420. return true;
  421. return false;
  422. }
  423. enum hpx_type3_cfg_loc {
  424. HPX_CFG_PCICFG = 0,
  425. HPX_CFG_PCIE_CAP = 1,
  426. HPX_CFG_PCIE_CAP_EXT = 2,
  427. HPX_CFG_VEND_CAP = 3,
  428. HPX_CFG_DVSEC = 4,
  429. HPX_CFG_MAX,
  430. };
  431. static void program_hpx_type3_register(struct pci_dev *dev,
  432. const struct hpx_type3 *reg)
  433. {
  434. u32 match_reg, write_reg, header, orig_value;
  435. u16 pos;
  436. if (!(hpx3_device_type(dev) & reg->device_type))
  437. return;
  438. if (!(hpx3_function_type(dev) & reg->function_type))
  439. return;
  440. switch (reg->config_space_location) {
  441. case HPX_CFG_PCICFG:
  442. pos = 0;
  443. break;
  444. case HPX_CFG_PCIE_CAP:
  445. pos = pci_find_capability(dev, reg->pci_exp_cap_id);
  446. if (pos == 0)
  447. return;
  448. break;
  449. case HPX_CFG_PCIE_CAP_EXT:
  450. pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
  451. if (pos == 0)
  452. return;
  453. pci_read_config_dword(dev, pos, &header);
  454. if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
  455. reg->pci_exp_cap_ver))
  456. return;
  457. break;
  458. case HPX_CFG_VEND_CAP:
  459. case HPX_CFG_DVSEC:
  460. default:
  461. pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
  462. return;
  463. }
  464. pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
  465. if ((match_reg & reg->match_mask_and) != reg->match_value)
  466. return;
  467. pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
  468. orig_value = write_reg;
  469. write_reg &= reg->reg_mask_and;
  470. write_reg |= reg->reg_mask_or;
  471. if (orig_value == write_reg)
  472. return;
  473. pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
  474. pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
  475. pos, orig_value, write_reg);
  476. }
  477. static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
  478. {
  479. if (!hpx)
  480. return;
  481. if (!pci_is_pcie(dev))
  482. return;
  483. program_hpx_type3_register(dev, hpx);
  484. }
  485. static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
  486. union acpi_object *reg_fields)
  487. {
  488. hpx3_reg->device_type = reg_fields[0].integer.value;
  489. hpx3_reg->function_type = reg_fields[1].integer.value;
  490. hpx3_reg->config_space_location = reg_fields[2].integer.value;
  491. hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
  492. hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
  493. hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
  494. hpx3_reg->dvsec_id = reg_fields[6].integer.value;
  495. hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
  496. hpx3_reg->match_offset = reg_fields[8].integer.value;
  497. hpx3_reg->match_mask_and = reg_fields[9].integer.value;
  498. hpx3_reg->match_value = reg_fields[10].integer.value;
  499. hpx3_reg->reg_offset = reg_fields[11].integer.value;
  500. hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
  501. hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
  502. }
  503. static acpi_status program_type3_hpx_record(struct pci_dev *dev,
  504. union acpi_object *record)
  505. {
  506. union acpi_object *fields = record->package.elements;
  507. u32 desc_count, expected_length, revision;
  508. union acpi_object *reg_fields;
  509. struct hpx_type3 hpx3;
  510. int i;
  511. revision = fields[1].integer.value;
  512. switch (revision) {
  513. case 1:
  514. desc_count = fields[2].integer.value;
  515. expected_length = 3 + desc_count * 14;
  516. if (record->package.count != expected_length)
  517. return AE_ERROR;
  518. for (i = 2; i < expected_length; i++)
  519. if (fields[i].type != ACPI_TYPE_INTEGER)
  520. return AE_ERROR;
  521. for (i = 0; i < desc_count; i++) {
  522. reg_fields = fields + 3 + i * 14;
  523. parse_hpx3_register(&hpx3, reg_fields);
  524. program_hpx_type3(dev, &hpx3);
  525. }
  526. break;
  527. default:
  528. printk(KERN_WARNING
  529. "%s: Type 3 Revision %d record not supported\n",
  530. __func__, revision);
  531. return AE_ERROR;
  532. }
  533. return AE_OK;
  534. }
  535. static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
  536. {
  537. acpi_status status;
  538. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  539. union acpi_object *package, *record, *fields;
  540. struct hpx_type0 hpx0;
  541. struct hpx_type1 hpx1;
  542. struct hpx_type2 hpx2;
  543. u32 type;
  544. int i;
  545. status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
  546. if (ACPI_FAILURE(status))
  547. return status;
  548. package = (union acpi_object *)buffer.pointer;
  549. if (package->type != ACPI_TYPE_PACKAGE) {
  550. status = AE_ERROR;
  551. goto exit;
  552. }
  553. for (i = 0; i < package->package.count; i++) {
  554. record = &package->package.elements[i];
  555. if (record->type != ACPI_TYPE_PACKAGE) {
  556. status = AE_ERROR;
  557. goto exit;
  558. }
  559. fields = record->package.elements;
  560. if (fields[0].type != ACPI_TYPE_INTEGER ||
  561. fields[1].type != ACPI_TYPE_INTEGER) {
  562. status = AE_ERROR;
  563. goto exit;
  564. }
  565. type = fields[0].integer.value;
  566. switch (type) {
  567. case 0:
  568. memset(&hpx0, 0, sizeof(hpx0));
  569. status = decode_type0_hpx_record(record, &hpx0);
  570. if (ACPI_FAILURE(status))
  571. goto exit;
  572. program_hpx_type0(dev, &hpx0);
  573. break;
  574. case 1:
  575. memset(&hpx1, 0, sizeof(hpx1));
  576. status = decode_type1_hpx_record(record, &hpx1);
  577. if (ACPI_FAILURE(status))
  578. goto exit;
  579. program_hpx_type1(dev, &hpx1);
  580. break;
  581. case 2:
  582. memset(&hpx2, 0, sizeof(hpx2));
  583. status = decode_type2_hpx_record(record, &hpx2);
  584. if (ACPI_FAILURE(status))
  585. goto exit;
  586. program_hpx_type2(dev, &hpx2);
  587. break;
  588. case 3:
  589. status = program_type3_hpx_record(dev, record);
  590. if (ACPI_FAILURE(status))
  591. goto exit;
  592. break;
  593. default:
  594. pr_err("%s: Type %d record not supported\n",
  595. __func__, type);
  596. status = AE_ERROR;
  597. goto exit;
  598. }
  599. }
  600. exit:
  601. kfree(buffer.pointer);
  602. return status;
  603. }
  604. static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
  605. {
  606. acpi_status status;
  607. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  608. union acpi_object *package, *fields;
  609. struct hpx_type0 hpx0;
  610. int i;
  611. memset(&hpx0, 0, sizeof(hpx0));
  612. status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
  613. if (ACPI_FAILURE(status))
  614. return status;
  615. package = (union acpi_object *) buffer.pointer;
  616. if (package->type != ACPI_TYPE_PACKAGE ||
  617. package->package.count != 4) {
  618. status = AE_ERROR;
  619. goto exit;
  620. }
  621. fields = package->package.elements;
  622. for (i = 0; i < 4; i++) {
  623. if (fields[i].type != ACPI_TYPE_INTEGER) {
  624. status = AE_ERROR;
  625. goto exit;
  626. }
  627. }
  628. hpx0.revision = 1;
  629. hpx0.cache_line_size = fields[0].integer.value;
  630. hpx0.latency_timer = fields[1].integer.value;
  631. hpx0.enable_serr = fields[2].integer.value;
  632. hpx0.enable_perr = fields[3].integer.value;
  633. program_hpx_type0(dev, &hpx0);
  634. exit:
  635. kfree(buffer.pointer);
  636. return status;
  637. }
  638. /* pci_acpi_program_hp_params
  639. *
  640. * @dev - the pci_dev for which we want parameters
  641. */
  642. int pci_acpi_program_hp_params(struct pci_dev *dev)
  643. {
  644. acpi_status status;
  645. acpi_handle handle, phandle;
  646. struct pci_bus *pbus;
  647. if (acpi_pci_disabled)
  648. return -ENODEV;
  649. handle = NULL;
  650. for (pbus = dev->bus; pbus; pbus = pbus->parent) {
  651. handle = acpi_pci_get_bridge_handle(pbus);
  652. if (handle)
  653. break;
  654. }
  655. /*
  656. * _HPP settings apply to all child buses, until another _HPP is
  657. * encountered. If we don't find an _HPP for the input pci dev,
  658. * look for it in the parent device scope since that would apply to
  659. * this pci dev.
  660. */
  661. while (handle) {
  662. status = acpi_run_hpx(dev, handle);
  663. if (ACPI_SUCCESS(status))
  664. return 0;
  665. status = acpi_run_hpp(dev, handle);
  666. if (ACPI_SUCCESS(status))
  667. return 0;
  668. if (acpi_is_root_bridge(handle))
  669. break;
  670. status = acpi_get_parent(handle, &phandle);
  671. if (ACPI_FAILURE(status))
  672. break;
  673. handle = phandle;
  674. }
  675. return -ENODEV;
  676. }
  677. /**
  678. * pciehp_is_native - Check whether a hotplug port is handled by the OS
  679. * @bridge: Hotplug port to check
  680. *
  681. * Returns true if the given @bridge is handled by the native PCIe hotplug
  682. * driver.
  683. */
  684. bool pciehp_is_native(struct pci_dev *bridge)
  685. {
  686. const struct pci_host_bridge *host;
  687. u32 slot_cap;
  688. if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
  689. return false;
  690. pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
  691. if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
  692. return false;
  693. if (pcie_ports_native)
  694. return true;
  695. host = pci_find_host_bridge(bridge->bus);
  696. return host->native_pcie_hotplug;
  697. }
  698. /**
  699. * shpchp_is_native - Check whether a hotplug port is handled by the OS
  700. * @bridge: Hotplug port to check
  701. *
  702. * Returns true if the given @bridge is handled by the native SHPC hotplug
  703. * driver.
  704. */
  705. bool shpchp_is_native(struct pci_dev *bridge)
  706. {
  707. return bridge->shpc_managed;
  708. }
  709. /**
  710. * pci_acpi_wake_bus - Root bus wakeup notification fork function.
  711. * @context: Device wakeup context.
  712. */
  713. static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
  714. {
  715. struct acpi_device *adev;
  716. struct acpi_pci_root *root;
  717. adev = container_of(context, struct acpi_device, wakeup.context);
  718. root = acpi_driver_data(adev);
  719. pci_pme_wakeup_bus(root->bus);
  720. }
  721. /**
  722. * pci_acpi_wake_dev - PCI device wakeup notification work function.
  723. * @context: Device wakeup context.
  724. */
  725. static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
  726. {
  727. struct pci_dev *pci_dev;
  728. pci_dev = to_pci_dev(context->dev);
  729. if (pci_dev->pme_poll)
  730. pci_dev->pme_poll = false;
  731. if (pci_dev->current_state == PCI_D3cold) {
  732. pci_wakeup_event(pci_dev);
  733. pm_request_resume(&pci_dev->dev);
  734. return;
  735. }
  736. /* Clear PME Status if set. */
  737. if (pci_dev->pme_support)
  738. pci_check_pme_status(pci_dev);
  739. pci_wakeup_event(pci_dev);
  740. pm_request_resume(&pci_dev->dev);
  741. pci_pme_wakeup_bus(pci_dev->subordinate);
  742. }
  743. /**
  744. * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
  745. * @dev: PCI root bridge ACPI device.
  746. */
  747. acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
  748. {
  749. return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
  750. }
  751. /**
  752. * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
  753. * @dev: ACPI device to add the notifier for.
  754. * @pci_dev: PCI device to check for the PME status if an event is signaled.
  755. */
  756. acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
  757. struct pci_dev *pci_dev)
  758. {
  759. return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
  760. }
  761. /*
  762. * _SxD returns the D-state with the highest power
  763. * (lowest D-state number) supported in the S-state "x".
  764. *
  765. * If the devices does not have a _PRW
  766. * (Power Resources for Wake) supporting system wakeup from "x"
  767. * then the OS is free to choose a lower power (higher number
  768. * D-state) than the return value from _SxD.
  769. *
  770. * But if _PRW is enabled at S-state "x", the OS
  771. * must not choose a power lower than _SxD --
  772. * unless the device has an _SxW method specifying
  773. * the lowest power (highest D-state number) the device
  774. * may enter while still able to wake the system.
  775. *
  776. * ie. depending on global OS policy:
  777. *
  778. * if (_PRW at S-state x)
  779. * choose from highest power _SxD to lowest power _SxW
  780. * else // no _PRW at S-state x
  781. * choose highest power _SxD or any lower power
  782. */
  783. pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
  784. {
  785. int acpi_state, d_max;
  786. if (pdev->no_d3cold || !pdev->d3cold_allowed)
  787. d_max = ACPI_STATE_D3_HOT;
  788. else
  789. d_max = ACPI_STATE_D3_COLD;
  790. acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
  791. if (acpi_state < 0)
  792. return PCI_POWER_ERROR;
  793. switch (acpi_state) {
  794. case ACPI_STATE_D0:
  795. return PCI_D0;
  796. case ACPI_STATE_D1:
  797. return PCI_D1;
  798. case ACPI_STATE_D2:
  799. return PCI_D2;
  800. case ACPI_STATE_D3_HOT:
  801. return PCI_D3hot;
  802. case ACPI_STATE_D3_COLD:
  803. return PCI_D3cold;
  804. }
  805. return PCI_POWER_ERROR;
  806. }
  807. static struct acpi_device *acpi_pci_find_companion(struct device *dev);
  808. void pci_set_acpi_fwnode(struct pci_dev *dev)
  809. {
  810. if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
  811. ACPI_COMPANION_SET(&dev->dev,
  812. acpi_pci_find_companion(&dev->dev));
  813. }
  814. /**
  815. * pci_dev_acpi_reset - do a function level reset using _RST method
  816. * @dev: device to reset
  817. * @probe: if true, return 0 if device supports _RST
  818. */
  819. int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
  820. {
  821. acpi_handle handle = ACPI_HANDLE(&dev->dev);
  822. if (!handle || !acpi_has_method(handle, "_RST"))
  823. return -ENOTTY;
  824. if (probe)
  825. return 0;
  826. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
  827. pci_warn(dev, "ACPI _RST failed\n");
  828. return -ENOTTY;
  829. }
  830. return 0;
  831. }
  832. bool acpi_pci_power_manageable(struct pci_dev *dev)
  833. {
  834. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  835. return adev && acpi_device_power_manageable(adev);
  836. }
  837. bool acpi_pci_bridge_d3(struct pci_dev *dev)
  838. {
  839. struct pci_dev *rpdev;
  840. struct acpi_device *adev, *rpadev;
  841. const union acpi_object *obj;
  842. if (acpi_pci_disabled || !dev->is_hotplug_bridge)
  843. return false;
  844. adev = ACPI_COMPANION(&dev->dev);
  845. if (adev) {
  846. /*
  847. * If the bridge has _S0W, whether or not it can go into D3
  848. * depends on what is returned by that object. In particular,
  849. * if the power state returned by _S0W is D2 or shallower,
  850. * entering D3 should not be allowed.
  851. */
  852. if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
  853. return false;
  854. /*
  855. * Otherwise, assume that the bridge can enter D3 so long as it
  856. * is power-manageable via ACPI.
  857. */
  858. if (acpi_device_power_manageable(adev))
  859. return true;
  860. }
  861. rpdev = pcie_find_root_port(dev);
  862. if (!rpdev)
  863. return false;
  864. if (rpdev == dev)
  865. rpadev = adev;
  866. else
  867. rpadev = ACPI_COMPANION(&rpdev->dev);
  868. if (!rpadev)
  869. return false;
  870. /*
  871. * If the Root Port cannot signal wakeup signals at all, i.e., it
  872. * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
  873. * events from low-power states including D3hot and D3cold.
  874. */
  875. if (!rpadev->wakeup.flags.valid)
  876. return false;
  877. /*
  878. * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
  879. * to verify whether or not it can signal wakeup from D3.
  880. */
  881. if (rpadev != adev &&
  882. acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
  883. return false;
  884. /*
  885. * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
  886. * the Port can signal hotplug events while in D3. We assume any
  887. * bridges *below* that Root Port can also signal hotplug events
  888. * while in D3.
  889. */
  890. if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
  891. ACPI_TYPE_INTEGER, &obj) &&
  892. obj->integer.value == 1)
  893. return true;
  894. return false;
  895. }
  896. static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable)
  897. {
  898. int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT;
  899. int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev),
  900. ACPI_ADR_SPACE_PCI_CONFIG, val);
  901. if (ret)
  902. pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n",
  903. enable ? "connect" : "disconnect", ret);
  904. }
  905. int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
  906. {
  907. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  908. static const u8 state_conv[] = {
  909. [PCI_D0] = ACPI_STATE_D0,
  910. [PCI_D1] = ACPI_STATE_D1,
  911. [PCI_D2] = ACPI_STATE_D2,
  912. [PCI_D3hot] = ACPI_STATE_D3_HOT,
  913. [PCI_D3cold] = ACPI_STATE_D3_COLD,
  914. };
  915. int error;
  916. /* If the ACPI device has _EJ0, ignore the device */
  917. if (!adev || acpi_has_method(adev->handle, "_EJ0"))
  918. return -ENODEV;
  919. switch (state) {
  920. case PCI_D0:
  921. case PCI_D1:
  922. case PCI_D2:
  923. case PCI_D3hot:
  924. case PCI_D3cold:
  925. break;
  926. default:
  927. return -EINVAL;
  928. }
  929. if (state == PCI_D3cold) {
  930. if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
  931. PM_QOS_FLAGS_ALL)
  932. return -EBUSY;
  933. /* Notify AML lack of PCI config space availability */
  934. acpi_pci_config_space_access(dev, false);
  935. }
  936. error = acpi_device_set_power(adev, state_conv[state]);
  937. if (error)
  938. return error;
  939. pci_dbg(dev, "power state changed by ACPI to %s\n",
  940. acpi_power_state_string(adev->power.state));
  941. /*
  942. * Notify AML of PCI config space availability. Config space is
  943. * accessible in all states except D3cold; the only transitions
  944. * that change availability are transitions to D3cold and from
  945. * D3cold to D0.
  946. */
  947. if (state == PCI_D0)
  948. acpi_pci_config_space_access(dev, true);
  949. return 0;
  950. }
  951. pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
  952. {
  953. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  954. static const pci_power_t state_conv[] = {
  955. [ACPI_STATE_D0] = PCI_D0,
  956. [ACPI_STATE_D1] = PCI_D1,
  957. [ACPI_STATE_D2] = PCI_D2,
  958. [ACPI_STATE_D3_HOT] = PCI_D3hot,
  959. [ACPI_STATE_D3_COLD] = PCI_D3cold,
  960. };
  961. int state;
  962. if (!adev || !acpi_device_power_manageable(adev))
  963. return PCI_UNKNOWN;
  964. state = adev->power.state;
  965. if (state == ACPI_STATE_UNKNOWN)
  966. return PCI_UNKNOWN;
  967. return state_conv[state];
  968. }
  969. void acpi_pci_refresh_power_state(struct pci_dev *dev)
  970. {
  971. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  972. if (adev && acpi_device_power_manageable(adev))
  973. acpi_device_update_power(adev, NULL);
  974. }
  975. static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
  976. {
  977. while (bus->parent) {
  978. if (acpi_pm_device_can_wakeup(&bus->self->dev))
  979. return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
  980. bus = bus->parent;
  981. }
  982. /* We have reached the root bus. */
  983. if (bus->bridge) {
  984. if (acpi_pm_device_can_wakeup(bus->bridge))
  985. return acpi_pm_set_device_wakeup(bus->bridge, enable);
  986. }
  987. return 0;
  988. }
  989. int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
  990. {
  991. if (acpi_pci_disabled)
  992. return 0;
  993. if (acpi_pm_device_can_wakeup(&dev->dev))
  994. return acpi_pm_set_device_wakeup(&dev->dev, enable);
  995. return acpi_pci_propagate_wakeup(dev->bus, enable);
  996. }
  997. bool acpi_pci_need_resume(struct pci_dev *dev)
  998. {
  999. struct acpi_device *adev;
  1000. if (acpi_pci_disabled)
  1001. return false;
  1002. /*
  1003. * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
  1004. * system-wide suspend/resume confuses the platform firmware, so avoid
  1005. * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint
  1006. * devices are expected to be in D3 before invoking the S3 entry path
  1007. * from the firmware, so they should not be affected by this issue.
  1008. */
  1009. if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
  1010. return true;
  1011. adev = ACPI_COMPANION(&dev->dev);
  1012. if (!adev || !acpi_device_power_manageable(adev))
  1013. return false;
  1014. if (adev->wakeup.flags.valid &&
  1015. device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
  1016. return true;
  1017. if (acpi_target_system_state() == ACPI_STATE_S0)
  1018. return false;
  1019. return !!adev->power.flags.dsw_present;
  1020. }
  1021. void acpi_pci_add_bus(struct pci_bus *bus)
  1022. {
  1023. union acpi_object *obj;
  1024. struct pci_host_bridge *bridge;
  1025. if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
  1026. return;
  1027. acpi_pci_slot_enumerate(bus);
  1028. acpiphp_enumerate_slots(bus);
  1029. /*
  1030. * For a host bridge, check its _DSM for function 8 and if
  1031. * that is available, mark it in pci_host_bridge.
  1032. */
  1033. if (!pci_is_root_bus(bus))
  1034. return;
  1035. obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
  1036. DSM_PCI_POWER_ON_RESET_DELAY, NULL);
  1037. if (!obj)
  1038. return;
  1039. if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
  1040. bridge = pci_find_host_bridge(bus);
  1041. bridge->ignore_reset_delay = 1;
  1042. }
  1043. ACPI_FREE(obj);
  1044. }
  1045. void acpi_pci_remove_bus(struct pci_bus *bus)
  1046. {
  1047. if (acpi_pci_disabled || !bus->bridge)
  1048. return;
  1049. acpiphp_remove_slots(bus);
  1050. acpi_pci_slot_remove(bus);
  1051. }
  1052. /* ACPI bus type */
  1053. static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
  1054. static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
  1055. /**
  1056. * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
  1057. * @func: ACPI companion lookup callback pointer or NULL.
  1058. *
  1059. * Set a special ACPI companion lookup callback for PCI devices whose companion
  1060. * objects in the ACPI namespace have _ADR with non-standard bus-device-function
  1061. * encodings.
  1062. *
  1063. * Return 0 on success or a negative error code on failure (in which case no
  1064. * changes are made).
  1065. *
  1066. * The caller is responsible for the appropriate ordering of the invocations of
  1067. * this function with respect to the enumeration of the PCI devices needing the
  1068. * callback installed by it.
  1069. */
  1070. int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
  1071. {
  1072. int ret;
  1073. if (!func)
  1074. return -EINVAL;
  1075. down_write(&pci_acpi_companion_lookup_sem);
  1076. if (pci_acpi_find_companion_hook) {
  1077. ret = -EBUSY;
  1078. } else {
  1079. pci_acpi_find_companion_hook = func;
  1080. ret = 0;
  1081. }
  1082. up_write(&pci_acpi_companion_lookup_sem);
  1083. return ret;
  1084. }
  1085. EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
  1086. /**
  1087. * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
  1088. *
  1089. * Clear the special ACPI companion lookup callback previously set by
  1090. * pci_acpi_set_companion_lookup_hook(). Block until the last running instance
  1091. * of the callback returns before clearing it.
  1092. *
  1093. * The caller is responsible for the appropriate ordering of the invocations of
  1094. * this function with respect to the enumeration of the PCI devices needing the
  1095. * callback cleared by it.
  1096. */
  1097. void pci_acpi_clear_companion_lookup_hook(void)
  1098. {
  1099. down_write(&pci_acpi_companion_lookup_sem);
  1100. pci_acpi_find_companion_hook = NULL;
  1101. up_write(&pci_acpi_companion_lookup_sem);
  1102. }
  1103. EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
  1104. static struct acpi_device *acpi_pci_find_companion(struct device *dev)
  1105. {
  1106. struct pci_dev *pci_dev = to_pci_dev(dev);
  1107. struct acpi_device *adev;
  1108. bool check_children;
  1109. u64 addr;
  1110. if (!dev->parent)
  1111. return NULL;
  1112. down_read(&pci_acpi_companion_lookup_sem);
  1113. adev = pci_acpi_find_companion_hook ?
  1114. pci_acpi_find_companion_hook(pci_dev) : NULL;
  1115. up_read(&pci_acpi_companion_lookup_sem);
  1116. if (adev)
  1117. return adev;
  1118. check_children = pci_is_bridge(pci_dev);
  1119. /* Please ref to ACPI spec for the syntax of _ADR */
  1120. addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
  1121. adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
  1122. check_children);
  1123. /*
  1124. * There may be ACPI device objects in the ACPI namespace that are
  1125. * children of the device object representing the host bridge, but don't
  1126. * represent PCI devices. Both _HID and _ADR may be present for them,
  1127. * even though that is against the specification (for example, see
  1128. * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
  1129. * appears to indicate that they should not be taken into consideration
  1130. * as potential companions of PCI devices on the root bus.
  1131. *
  1132. * To catch this special case, disregard the returned device object if
  1133. * it has a valid _HID, addr is 0 and the PCI device at hand is on the
  1134. * root bus.
  1135. */
  1136. if (adev && adev->pnp.type.platform_id && !addr &&
  1137. pci_is_root_bus(pci_dev->bus))
  1138. return NULL;
  1139. return adev;
  1140. }
  1141. /**
  1142. * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
  1143. * @pdev: the PCI device whose delay is to be updated
  1144. * @handle: ACPI handle of this device
  1145. *
  1146. * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
  1147. * control method of either the device itself or the PCI host bridge.
  1148. *
  1149. * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
  1150. * host bridge. If it returns one, the OS may assume that all devices in
  1151. * the hierarchy have already completed power-on reset delays.
  1152. *
  1153. * Function 9, "Device Readiness Durations," applies only to the object
  1154. * where it is located. It returns delay durations required after various
  1155. * events if the device requires less time than the spec requires. Delays
  1156. * from this function take precedence over the Reset Delay function.
  1157. *
  1158. * These _DSM functions are defined by the draft ECN of January 28, 2014,
  1159. * titled "ACPI additions for FW latency optimizations."
  1160. */
  1161. static void pci_acpi_optimize_delay(struct pci_dev *pdev,
  1162. acpi_handle handle)
  1163. {
  1164. struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
  1165. int value;
  1166. union acpi_object *obj, *elements;
  1167. if (bridge->ignore_reset_delay)
  1168. pdev->d3cold_delay = 0;
  1169. obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
  1170. DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
  1171. if (!obj)
  1172. return;
  1173. if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
  1174. elements = obj->package.elements;
  1175. if (elements[0].type == ACPI_TYPE_INTEGER) {
  1176. value = (int)elements[0].integer.value / 1000;
  1177. if (value < PCI_PM_D3COLD_WAIT)
  1178. pdev->d3cold_delay = value;
  1179. }
  1180. if (elements[3].type == ACPI_TYPE_INTEGER) {
  1181. value = (int)elements[3].integer.value / 1000;
  1182. if (value < PCI_PM_D3HOT_WAIT)
  1183. pdev->d3hot_delay = value;
  1184. }
  1185. }
  1186. ACPI_FREE(obj);
  1187. }
  1188. static void pci_acpi_set_external_facing(struct pci_dev *dev)
  1189. {
  1190. u8 val;
  1191. if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
  1192. return;
  1193. if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
  1194. return;
  1195. /*
  1196. * These root ports expose PCIe (including DMA) outside of the
  1197. * system. Everything downstream from them is external.
  1198. */
  1199. if (val)
  1200. dev->external_facing = 1;
  1201. }
  1202. void pci_acpi_setup(struct device *dev, struct acpi_device *adev)
  1203. {
  1204. struct pci_dev *pci_dev = to_pci_dev(dev);
  1205. pci_acpi_optimize_delay(pci_dev, adev->handle);
  1206. pci_acpi_set_external_facing(pci_dev);
  1207. pci_acpi_add_edr_notifier(pci_dev);
  1208. pci_acpi_add_pm_notifier(adev, pci_dev);
  1209. if (!adev->wakeup.flags.valid)
  1210. return;
  1211. device_set_wakeup_capable(dev, true);
  1212. /*
  1213. * For bridges that can do D3 we enable wake automatically (as
  1214. * we do for the power management itself in that case). The
  1215. * reason is that the bridge may have additional methods such as
  1216. * _DSW that need to be called.
  1217. */
  1218. if (pci_dev->bridge_d3)
  1219. device_wakeup_enable(dev);
  1220. acpi_pci_wakeup(pci_dev, false);
  1221. acpi_device_power_add_dependent(adev, dev);
  1222. if (pci_is_bridge(pci_dev))
  1223. acpi_dev_power_up_children_with_adr(adev);
  1224. }
  1225. void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev)
  1226. {
  1227. struct pci_dev *pci_dev = to_pci_dev(dev);
  1228. pci_acpi_remove_edr_notifier(pci_dev);
  1229. pci_acpi_remove_pm_notifier(adev);
  1230. if (adev->wakeup.flags.valid) {
  1231. acpi_device_power_remove_dependent(adev, dev);
  1232. if (pci_dev->bridge_d3)
  1233. device_wakeup_disable(dev);
  1234. device_set_wakeup_capable(dev, false);
  1235. }
  1236. }
  1237. static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
  1238. /**
  1239. * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
  1240. * @fn: Callback matching a device to a fwnode that identifies a PCI
  1241. * MSI domain.
  1242. *
  1243. * This should be called by irqchip driver, which is the parent of
  1244. * the MSI domain to provide callback interface to query fwnode.
  1245. */
  1246. void
  1247. pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
  1248. {
  1249. pci_msi_get_fwnode_cb = fn;
  1250. }
  1251. /**
  1252. * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
  1253. * @bus: The PCI host bridge bus.
  1254. *
  1255. * This function uses the callback function registered by
  1256. * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
  1257. * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
  1258. * This returns NULL on error or when the domain is not found.
  1259. */
  1260. struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
  1261. {
  1262. struct fwnode_handle *fwnode;
  1263. if (!pci_msi_get_fwnode_cb)
  1264. return NULL;
  1265. fwnode = pci_msi_get_fwnode_cb(&bus->dev);
  1266. if (!fwnode)
  1267. return NULL;
  1268. return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
  1269. }
  1270. static int __init acpi_pci_init(void)
  1271. {
  1272. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
  1273. pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
  1274. pci_no_msi();
  1275. }
  1276. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
  1277. pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
  1278. pcie_no_aspm();
  1279. }
  1280. if (acpi_pci_disabled)
  1281. return 0;
  1282. acpi_pci_slot_init();
  1283. acpiphp_init();
  1284. return 0;
  1285. }
  1286. arch_initcall(acpi_pci_init);