aspm.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Enable PCIe link L0s/L1 state and Clock Power Management
  4. *
  5. * Copyright (C) 2007 Intel
  6. * Copyright (C) Zhang Yanmin ([email protected])
  7. * Copyright (C) Shaohua Li ([email protected])
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/math.h>
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/pci.h>
  14. #include <linux/pci_regs.h>
  15. #include <linux/errno.h>
  16. #include <linux/pm.h>
  17. #include <linux/init.h>
  18. #include <linux/slab.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/delay.h>
  21. #include "../pci.h"
  22. #ifdef MODULE_PARAM_PREFIX
  23. #undef MODULE_PARAM_PREFIX
  24. #endif
  25. #define MODULE_PARAM_PREFIX "pcie_aspm."
  26. /* Note: those are not register definitions */
  27. #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
  28. #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
  29. #define ASPM_STATE_L1 (4) /* L1 state */
  30. #define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
  31. #define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
  32. #define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
  33. #define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
  34. #define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
  35. #define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
  36. #define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
  37. ASPM_STATE_L1_2_MASK)
  38. #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
  39. #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
  40. ASPM_STATE_L1SS)
  41. struct pcie_link_state {
  42. struct pci_dev *pdev; /* Upstream component of the Link */
  43. struct pci_dev *downstream; /* Downstream component, function 0 */
  44. struct pcie_link_state *root; /* pointer to the root port link */
  45. struct pcie_link_state *parent; /* pointer to the parent Link state */
  46. struct list_head sibling; /* node in link_list */
  47. /* ASPM state */
  48. u32 aspm_support:7; /* Supported ASPM state */
  49. u32 aspm_enabled:7; /* Enabled ASPM state */
  50. u32 aspm_capable:7; /* Capable ASPM state with latency */
  51. u32 aspm_default:7; /* Default ASPM state by BIOS */
  52. u32 aspm_disable:7; /* Disabled ASPM state */
  53. /* Clock PM state */
  54. u32 clkpm_capable:1; /* Clock PM capable? */
  55. u32 clkpm_enabled:1; /* Current Clock PM state */
  56. u32 clkpm_default:1; /* Default Clock PM state by BIOS */
  57. u32 clkpm_disable:1; /* Clock PM disabled */
  58. };
  59. static int aspm_disabled, aspm_force;
  60. static bool aspm_support_enabled = true;
  61. static DEFINE_MUTEX(aspm_lock);
  62. static LIST_HEAD(link_list);
  63. #define POLICY_DEFAULT 0 /* BIOS default setting */
  64. #define POLICY_PERFORMANCE 1 /* high performance */
  65. #define POLICY_POWERSAVE 2 /* high power saving */
  66. #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
  67. #ifdef CONFIG_PCIEASPM_PERFORMANCE
  68. static int aspm_policy = POLICY_PERFORMANCE;
  69. #elif defined CONFIG_PCIEASPM_POWERSAVE
  70. static int aspm_policy = POLICY_POWERSAVE;
  71. #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
  72. static int aspm_policy = POLICY_POWER_SUPERSAVE;
  73. #else
  74. static int aspm_policy;
  75. #endif
  76. static const char *policy_str[] = {
  77. [POLICY_DEFAULT] = "default",
  78. [POLICY_PERFORMANCE] = "performance",
  79. [POLICY_POWERSAVE] = "powersave",
  80. [POLICY_POWER_SUPERSAVE] = "powersupersave"
  81. };
  82. #define LINK_RETRAIN_TIMEOUT HZ
  83. /*
  84. * The L1 PM substate capability is only implemented in function 0 in a
  85. * multi function device.
  86. */
  87. static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
  88. {
  89. struct pci_dev *child;
  90. list_for_each_entry(child, &linkbus->devices, bus_list)
  91. if (PCI_FUNC(child->devfn) == 0)
  92. return child;
  93. return NULL;
  94. }
  95. static int policy_to_aspm_state(struct pcie_link_state *link)
  96. {
  97. switch (aspm_policy) {
  98. case POLICY_PERFORMANCE:
  99. /* Disable ASPM and Clock PM */
  100. return 0;
  101. case POLICY_POWERSAVE:
  102. /* Enable ASPM L0s/L1 */
  103. return (ASPM_STATE_L0S | ASPM_STATE_L1);
  104. case POLICY_POWER_SUPERSAVE:
  105. /* Enable Everything */
  106. return ASPM_STATE_ALL;
  107. case POLICY_DEFAULT:
  108. return link->aspm_default;
  109. }
  110. return 0;
  111. }
  112. static int policy_to_clkpm_state(struct pcie_link_state *link)
  113. {
  114. switch (aspm_policy) {
  115. case POLICY_PERFORMANCE:
  116. /* Disable ASPM and Clock PM */
  117. return 0;
  118. case POLICY_POWERSAVE:
  119. case POLICY_POWER_SUPERSAVE:
  120. /* Enable Clock PM */
  121. return 1;
  122. case POLICY_DEFAULT:
  123. return link->clkpm_default;
  124. }
  125. return 0;
  126. }
  127. static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
  128. {
  129. struct pci_dev *child;
  130. struct pci_bus *linkbus = link->pdev->subordinate;
  131. u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
  132. list_for_each_entry(child, &linkbus->devices, bus_list)
  133. pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
  134. PCI_EXP_LNKCTL_CLKREQ_EN,
  135. val);
  136. link->clkpm_enabled = !!enable;
  137. }
  138. static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
  139. {
  140. /*
  141. * Don't enable Clock PM if the link is not Clock PM capable
  142. * or Clock PM is disabled
  143. */
  144. if (!link->clkpm_capable || link->clkpm_disable)
  145. enable = 0;
  146. /* Need nothing if the specified equals to current state */
  147. if (link->clkpm_enabled == enable)
  148. return;
  149. pcie_set_clkpm_nocheck(link, enable);
  150. }
  151. static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
  152. {
  153. int capable = 1, enabled = 1;
  154. u32 reg32;
  155. u16 reg16;
  156. struct pci_dev *child;
  157. struct pci_bus *linkbus = link->pdev->subordinate;
  158. /* All functions should have the same cap and state, take the worst */
  159. list_for_each_entry(child, &linkbus->devices, bus_list) {
  160. pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
  161. if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
  162. capable = 0;
  163. enabled = 0;
  164. break;
  165. }
  166. pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
  167. if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
  168. enabled = 0;
  169. }
  170. link->clkpm_enabled = enabled;
  171. link->clkpm_default = enabled;
  172. link->clkpm_capable = capable;
  173. link->clkpm_disable = blacklist ? 1 : 0;
  174. }
  175. static int pcie_wait_for_retrain(struct pci_dev *pdev)
  176. {
  177. unsigned long end_jiffies;
  178. u16 reg16;
  179. /* Wait for Link Training to be cleared by hardware */
  180. end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
  181. do {
  182. pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &reg16);
  183. if (!(reg16 & PCI_EXP_LNKSTA_LT))
  184. return 0;
  185. msleep(1);
  186. } while (time_before(jiffies, end_jiffies));
  187. return -ETIMEDOUT;
  188. }
  189. static int pcie_retrain_link(struct pcie_link_state *link)
  190. {
  191. struct pci_dev *parent = link->pdev;
  192. int rc;
  193. u16 reg16;
  194. /*
  195. * Ensure the updated LNKCTL parameters are used during link
  196. * training by checking that there is no ongoing link training to
  197. * avoid LTSSM race as recommended in Implementation Note at the
  198. * end of PCIe r6.0.1 sec 7.5.3.7.
  199. */
  200. rc = pcie_wait_for_retrain(parent);
  201. if (rc)
  202. return rc;
  203. pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
  204. reg16 |= PCI_EXP_LNKCTL_RL;
  205. pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
  206. if (parent->clear_retrain_link) {
  207. /*
  208. * Due to an erratum in some devices the Retrain Link bit
  209. * needs to be cleared again manually to allow the link
  210. * training to succeed.
  211. */
  212. reg16 &= ~PCI_EXP_LNKCTL_RL;
  213. pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
  214. }
  215. return pcie_wait_for_retrain(parent);
  216. }
  217. /*
  218. * pcie_aspm_configure_common_clock: check if the 2 ends of a link
  219. * could use common clock. If they are, configure them to use the
  220. * common clock. That will reduce the ASPM state exit latency.
  221. */
  222. static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
  223. {
  224. int same_clock = 1;
  225. u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
  226. struct pci_dev *child, *parent = link->pdev;
  227. struct pci_bus *linkbus = parent->subordinate;
  228. /*
  229. * All functions of a slot should have the same Slot Clock
  230. * Configuration, so just check one function
  231. */
  232. child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
  233. BUG_ON(!pci_is_pcie(child));
  234. /* Check downstream component if bit Slot Clock Configuration is 1 */
  235. pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
  236. if (!(reg16 & PCI_EXP_LNKSTA_SLC))
  237. same_clock = 0;
  238. /* Check upstream component if bit Slot Clock Configuration is 1 */
  239. pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
  240. if (!(reg16 & PCI_EXP_LNKSTA_SLC))
  241. same_clock = 0;
  242. /* Port might be already in common clock mode */
  243. pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
  244. parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
  245. if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
  246. bool consistent = true;
  247. list_for_each_entry(child, &linkbus->devices, bus_list) {
  248. pcie_capability_read_word(child, PCI_EXP_LNKCTL,
  249. &reg16);
  250. if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
  251. consistent = false;
  252. break;
  253. }
  254. }
  255. if (consistent)
  256. return;
  257. pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
  258. }
  259. ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
  260. /* Configure downstream component, all functions */
  261. list_for_each_entry(child, &linkbus->devices, bus_list) {
  262. pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
  263. child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
  264. pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
  265. PCI_EXP_LNKCTL_CCC, ccc);
  266. }
  267. /* Configure upstream component */
  268. pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
  269. PCI_EXP_LNKCTL_CCC, ccc);
  270. if (pcie_retrain_link(link)) {
  271. /* Training failed. Restore common clock configurations */
  272. pci_err(parent, "ASPM: Could not configure common clock\n");
  273. list_for_each_entry(child, &linkbus->devices, bus_list)
  274. pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
  275. PCI_EXP_LNKCTL_CCC,
  276. child_old_ccc[PCI_FUNC(child->devfn)]);
  277. pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
  278. PCI_EXP_LNKCTL_CCC, parent_old_ccc);
  279. }
  280. }
  281. /* Convert L0s latency encoding to ns */
  282. static u32 calc_l0s_latency(u32 lnkcap)
  283. {
  284. u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
  285. if (encoding == 0x7)
  286. return (5 * 1000); /* > 4us */
  287. return (64 << encoding);
  288. }
  289. /* Convert L0s acceptable latency encoding to ns */
  290. static u32 calc_l0s_acceptable(u32 encoding)
  291. {
  292. if (encoding == 0x7)
  293. return -1U;
  294. return (64 << encoding);
  295. }
  296. /* Convert L1 latency encoding to ns */
  297. static u32 calc_l1_latency(u32 lnkcap)
  298. {
  299. u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
  300. if (encoding == 0x7)
  301. return (65 * 1000); /* > 64us */
  302. return (1000 << encoding);
  303. }
  304. /* Convert L1 acceptable latency encoding to ns */
  305. static u32 calc_l1_acceptable(u32 encoding)
  306. {
  307. if (encoding == 0x7)
  308. return -1U;
  309. return (1000 << encoding);
  310. }
  311. /* Convert L1SS T_pwr encoding to usec */
  312. static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
  313. {
  314. switch (scale) {
  315. case 0:
  316. return val * 2;
  317. case 1:
  318. return val * 10;
  319. case 2:
  320. return val * 100;
  321. }
  322. pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
  323. return 0;
  324. }
  325. /*
  326. * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
  327. * register. Ports enter L1.2 when the most recent LTR value is greater
  328. * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
  329. * don't enter L1.2 too aggressively.
  330. *
  331. * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
  332. */
  333. static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
  334. {
  335. u64 threshold_ns = (u64) threshold_us * 1000;
  336. /*
  337. * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
  338. * value of 0x3ff.
  339. */
  340. if (threshold_ns <= 0x3ff * 1) {
  341. *scale = 0; /* Value times 1ns */
  342. *value = threshold_ns;
  343. } else if (threshold_ns <= 0x3ff * 32) {
  344. *scale = 1; /* Value times 32ns */
  345. *value = roundup(threshold_ns, 32) / 32;
  346. } else if (threshold_ns <= 0x3ff * 1024) {
  347. *scale = 2; /* Value times 1024ns */
  348. *value = roundup(threshold_ns, 1024) / 1024;
  349. } else if (threshold_ns <= 0x3ff * 32768) {
  350. *scale = 3; /* Value times 32768ns */
  351. *value = roundup(threshold_ns, 32768) / 32768;
  352. } else if (threshold_ns <= 0x3ff * 1048576) {
  353. *scale = 4; /* Value times 1048576ns */
  354. *value = roundup(threshold_ns, 1048576) / 1048576;
  355. } else if (threshold_ns <= 0x3ff * (u64) 33554432) {
  356. *scale = 5; /* Value times 33554432ns */
  357. *value = roundup(threshold_ns, 33554432) / 33554432;
  358. } else {
  359. *scale = 5;
  360. *value = 0x3ff; /* Max representable value */
  361. }
  362. }
  363. static void pcie_aspm_check_latency(struct pci_dev *endpoint)
  364. {
  365. u32 latency, encoding, lnkcap_up, lnkcap_dw;
  366. u32 l1_switch_latency = 0, latency_up_l0s;
  367. u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
  368. u32 acceptable_l0s, acceptable_l1;
  369. struct pcie_link_state *link;
  370. /* Device not in D0 doesn't need latency check */
  371. if ((endpoint->current_state != PCI_D0) &&
  372. (endpoint->current_state != PCI_UNKNOWN))
  373. return;
  374. link = endpoint->bus->self->link_state;
  375. /* Calculate endpoint L0s acceptable latency */
  376. encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6;
  377. acceptable_l0s = calc_l0s_acceptable(encoding);
  378. /* Calculate endpoint L1 acceptable latency */
  379. encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9;
  380. acceptable_l1 = calc_l1_acceptable(encoding);
  381. while (link) {
  382. struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
  383. /* Read direction exit latencies */
  384. pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
  385. &lnkcap_up);
  386. pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
  387. &lnkcap_dw);
  388. latency_up_l0s = calc_l0s_latency(lnkcap_up);
  389. latency_up_l1 = calc_l1_latency(lnkcap_up);
  390. latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
  391. latency_dw_l1 = calc_l1_latency(lnkcap_dw);
  392. /* Check upstream direction L0s latency */
  393. if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
  394. (latency_up_l0s > acceptable_l0s))
  395. link->aspm_capable &= ~ASPM_STATE_L0S_UP;
  396. /* Check downstream direction L0s latency */
  397. if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
  398. (latency_dw_l0s > acceptable_l0s))
  399. link->aspm_capable &= ~ASPM_STATE_L0S_DW;
  400. /*
  401. * Check L1 latency.
  402. * Every switch on the path to root complex need 1
  403. * more microsecond for L1. Spec doesn't mention L0s.
  404. *
  405. * The exit latencies for L1 substates are not advertised
  406. * by a device. Since the spec also doesn't mention a way
  407. * to determine max latencies introduced by enabling L1
  408. * substates on the components, it is not clear how to do
  409. * a L1 substate exit latency check. We assume that the
  410. * L1 exit latencies advertised by a device include L1
  411. * substate latencies (and hence do not do any check).
  412. */
  413. latency = max_t(u32, latency_up_l1, latency_dw_l1);
  414. if ((link->aspm_capable & ASPM_STATE_L1) &&
  415. (latency + l1_switch_latency > acceptable_l1))
  416. link->aspm_capable &= ~ASPM_STATE_L1;
  417. l1_switch_latency += 1000;
  418. link = link->parent;
  419. }
  420. }
  421. static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
  422. u32 clear, u32 set)
  423. {
  424. u32 val;
  425. pci_read_config_dword(pdev, pos, &val);
  426. val &= ~clear;
  427. val |= set;
  428. pci_write_config_dword(pdev, pos, val);
  429. }
  430. /* Calculate L1.2 PM substate timing parameters */
  431. static void aspm_calc_l1ss_info(struct pcie_link_state *link,
  432. u32 parent_l1ss_cap, u32 child_l1ss_cap)
  433. {
  434. struct pci_dev *child = link->downstream, *parent = link->pdev;
  435. u32 val1, val2, scale1, scale2;
  436. u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
  437. u32 ctl1 = 0, ctl2 = 0;
  438. u32 pctl1, pctl2, cctl1, cctl2;
  439. u32 pl1_2_enables, cl1_2_enables;
  440. if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
  441. return;
  442. /* Choose the greater of the two Port Common_Mode_Restore_Times */
  443. val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
  444. val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
  445. t_common_mode = max(val1, val2);
  446. /* Choose the greater of the two Port T_POWER_ON times */
  447. val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
  448. scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
  449. val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
  450. scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
  451. if (calc_l1ss_pwron(parent, scale1, val1) >
  452. calc_l1ss_pwron(child, scale2, val2)) {
  453. ctl2 |= scale1 | (val1 << 3);
  454. t_power_on = calc_l1ss_pwron(parent, scale1, val1);
  455. } else {
  456. ctl2 |= scale2 | (val2 << 3);
  457. t_power_on = calc_l1ss_pwron(child, scale2, val2);
  458. }
  459. /*
  460. * Set LTR_L1.2_THRESHOLD to the time required to transition the
  461. * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
  462. * downstream devices report (via LTR) that they can tolerate at
  463. * least that much latency.
  464. *
  465. * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
  466. * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
  467. * least 4us.
  468. */
  469. l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
  470. encode_l12_threshold(l1_2_threshold, &scale, &value);
  471. ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
  472. /* Some broken devices only support dword access to L1 SS */
  473. pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
  474. pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
  475. pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
  476. pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
  477. if (ctl1 == pctl1 && ctl1 == cctl1 &&
  478. ctl2 == pctl2 && ctl2 == cctl2)
  479. return;
  480. /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
  481. pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
  482. cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
  483. if (pl1_2_enables || cl1_2_enables) {
  484. pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
  485. PCI_L1SS_CTL1_L1_2_MASK, 0);
  486. pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
  487. PCI_L1SS_CTL1_L1_2_MASK, 0);
  488. }
  489. /* Program T_POWER_ON times in both ports */
  490. pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
  491. pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
  492. /* Program Common_Mode_Restore_Time in upstream device */
  493. pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
  494. PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
  495. /* Program LTR_L1.2_THRESHOLD time in both ports */
  496. pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
  497. PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
  498. PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
  499. pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
  500. PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
  501. PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
  502. if (pl1_2_enables || cl1_2_enables) {
  503. pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
  504. pl1_2_enables);
  505. pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
  506. cl1_2_enables);
  507. }
  508. }
  509. static void aspm_l1ss_init(struct pcie_link_state *link)
  510. {
  511. struct pci_dev *child = link->downstream, *parent = link->pdev;
  512. u32 parent_l1ss_cap, child_l1ss_cap;
  513. u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
  514. if (!parent->l1ss || !child->l1ss)
  515. return;
  516. /* Setup L1 substate */
  517. pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
  518. &parent_l1ss_cap);
  519. pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
  520. &child_l1ss_cap);
  521. if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
  522. parent_l1ss_cap = 0;
  523. if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
  524. child_l1ss_cap = 0;
  525. /*
  526. * If we don't have LTR for the entire path from the Root Complex
  527. * to this device, we can't use ASPM L1.2 because it relies on the
  528. * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
  529. */
  530. if (!child->ltr_path)
  531. child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
  532. if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
  533. link->aspm_support |= ASPM_STATE_L1_1;
  534. if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
  535. link->aspm_support |= ASPM_STATE_L1_2;
  536. if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
  537. link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
  538. if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
  539. link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
  540. if (parent_l1ss_cap)
  541. pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
  542. &parent_l1ss_ctl1);
  543. if (child_l1ss_cap)
  544. pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
  545. &child_l1ss_ctl1);
  546. if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
  547. link->aspm_enabled |= ASPM_STATE_L1_1;
  548. if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
  549. link->aspm_enabled |= ASPM_STATE_L1_2;
  550. if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
  551. link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
  552. if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
  553. link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
  554. if (link->aspm_support & ASPM_STATE_L1SS)
  555. aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
  556. }
  557. static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
  558. {
  559. struct pci_dev *child = link->downstream, *parent = link->pdev;
  560. u32 parent_lnkcap, child_lnkcap;
  561. u16 parent_lnkctl, child_lnkctl;
  562. struct pci_bus *linkbus = parent->subordinate;
  563. if (blacklist) {
  564. /* Set enabled/disable so that we will disable ASPM later */
  565. link->aspm_enabled = ASPM_STATE_ALL;
  566. link->aspm_disable = ASPM_STATE_ALL;
  567. return;
  568. }
  569. /*
  570. * If ASPM not supported, don't mess with the clocks and link,
  571. * bail out now.
  572. */
  573. pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
  574. pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
  575. if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
  576. return;
  577. /* Configure common clock before checking latencies */
  578. pcie_aspm_configure_common_clock(link);
  579. /*
  580. * Re-read upstream/downstream components' register state after
  581. * clock configuration. L0s & L1 exit latencies in the otherwise
  582. * read-only Link Capabilities may change depending on common clock
  583. * configuration (PCIe r5.0, sec 7.5.3.6).
  584. */
  585. pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
  586. pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
  587. pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
  588. pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
  589. /*
  590. * Setup L0s state
  591. *
  592. * Note that we must not enable L0s in either direction on a
  593. * given link unless components on both sides of the link each
  594. * support L0s.
  595. */
  596. if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
  597. link->aspm_support |= ASPM_STATE_L0S;
  598. if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
  599. link->aspm_enabled |= ASPM_STATE_L0S_UP;
  600. if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
  601. link->aspm_enabled |= ASPM_STATE_L0S_DW;
  602. /* Setup L1 state */
  603. if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
  604. link->aspm_support |= ASPM_STATE_L1;
  605. if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
  606. link->aspm_enabled |= ASPM_STATE_L1;
  607. aspm_l1ss_init(link);
  608. /* Save default state */
  609. link->aspm_default = link->aspm_enabled;
  610. /* Setup initial capable state. Will be updated later */
  611. link->aspm_capable = link->aspm_support;
  612. /* Get and check endpoint acceptable latencies */
  613. list_for_each_entry(child, &linkbus->devices, bus_list) {
  614. if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
  615. pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
  616. continue;
  617. pcie_aspm_check_latency(child);
  618. }
  619. }
  620. /* Configure the ASPM L1 substates */
  621. static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
  622. {
  623. u32 val, enable_req;
  624. struct pci_dev *child = link->downstream, *parent = link->pdev;
  625. enable_req = (link->aspm_enabled ^ state) & state;
  626. /*
  627. * Here are the rules specified in the PCIe spec for enabling L1SS:
  628. * - When enabling L1.x, enable bit at parent first, then at child
  629. * - When disabling L1.x, disable bit at child first, then at parent
  630. * - When enabling ASPM L1.x, need to disable L1
  631. * (at child followed by parent).
  632. * - The ASPM/PCIPM L1.2 must be disabled while programming timing
  633. * parameters
  634. *
  635. * To keep it simple, disable all L1SS bits first, and later enable
  636. * what is needed.
  637. */
  638. /* Disable all L1 substates */
  639. pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
  640. PCI_L1SS_CTL1_L1SS_MASK, 0);
  641. pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
  642. PCI_L1SS_CTL1_L1SS_MASK, 0);
  643. /*
  644. * If needed, disable L1, and it gets enabled later
  645. * in pcie_config_aspm_link().
  646. */
  647. if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
  648. pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
  649. PCI_EXP_LNKCTL_ASPM_L1, 0);
  650. pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
  651. PCI_EXP_LNKCTL_ASPM_L1, 0);
  652. }
  653. val = 0;
  654. if (state & ASPM_STATE_L1_1)
  655. val |= PCI_L1SS_CTL1_ASPM_L1_1;
  656. if (state & ASPM_STATE_L1_2)
  657. val |= PCI_L1SS_CTL1_ASPM_L1_2;
  658. if (state & ASPM_STATE_L1_1_PCIPM)
  659. val |= PCI_L1SS_CTL1_PCIPM_L1_1;
  660. if (state & ASPM_STATE_L1_2_PCIPM)
  661. val |= PCI_L1SS_CTL1_PCIPM_L1_2;
  662. /* Enable what we need to enable */
  663. pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
  664. PCI_L1SS_CTL1_L1SS_MASK, val);
  665. pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
  666. PCI_L1SS_CTL1_L1SS_MASK, val);
  667. }
  668. static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
  669. {
  670. pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
  671. PCI_EXP_LNKCTL_ASPMC, val);
  672. }
  673. static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
  674. {
  675. u32 upstream = 0, dwstream = 0;
  676. struct pci_dev *child = link->downstream, *parent = link->pdev;
  677. struct pci_bus *linkbus = parent->subordinate;
  678. /* Enable only the states that were not explicitly disabled */
  679. state &= (link->aspm_capable & ~link->aspm_disable);
  680. /* Can't enable any substates if L1 is not enabled */
  681. if (!(state & ASPM_STATE_L1))
  682. state &= ~ASPM_STATE_L1SS;
  683. /* Spec says both ports must be in D0 before enabling PCI PM substates*/
  684. if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
  685. state &= ~ASPM_STATE_L1_SS_PCIPM;
  686. state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
  687. }
  688. /* Nothing to do if the link is already in the requested state */
  689. if (link->aspm_enabled == state)
  690. return;
  691. /* Convert ASPM state to upstream/downstream ASPM register state */
  692. if (state & ASPM_STATE_L0S_UP)
  693. dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
  694. if (state & ASPM_STATE_L0S_DW)
  695. upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
  696. if (state & ASPM_STATE_L1) {
  697. upstream |= PCI_EXP_LNKCTL_ASPM_L1;
  698. dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
  699. }
  700. if (link->aspm_capable & ASPM_STATE_L1SS)
  701. pcie_config_aspm_l1ss(link, state);
  702. /*
  703. * Spec 2.0 suggests all functions should be configured the
  704. * same setting for ASPM. Enabling ASPM L1 should be done in
  705. * upstream component first and then downstream, and vice
  706. * versa for disabling ASPM L1. Spec doesn't mention L0S.
  707. */
  708. if (state & ASPM_STATE_L1)
  709. pcie_config_aspm_dev(parent, upstream);
  710. list_for_each_entry(child, &linkbus->devices, bus_list)
  711. pcie_config_aspm_dev(child, dwstream);
  712. if (!(state & ASPM_STATE_L1))
  713. pcie_config_aspm_dev(parent, upstream);
  714. link->aspm_enabled = state;
  715. }
  716. static void pcie_config_aspm_path(struct pcie_link_state *link)
  717. {
  718. while (link) {
  719. pcie_config_aspm_link(link, policy_to_aspm_state(link));
  720. link = link->parent;
  721. }
  722. }
  723. static void free_link_state(struct pcie_link_state *link)
  724. {
  725. link->pdev->link_state = NULL;
  726. kfree(link);
  727. }
  728. static int pcie_aspm_sanity_check(struct pci_dev *pdev)
  729. {
  730. struct pci_dev *child;
  731. u32 reg32;
  732. /*
  733. * Some functions in a slot might not all be PCIe functions,
  734. * very strange. Disable ASPM for the whole slot
  735. */
  736. list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
  737. if (!pci_is_pcie(child))
  738. return -EINVAL;
  739. /*
  740. * If ASPM is disabled then we're not going to change
  741. * the BIOS state. It's safe to continue even if it's a
  742. * pre-1.1 device
  743. */
  744. if (aspm_disabled)
  745. continue;
  746. /*
  747. * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
  748. * RBER bit to determine if a function is 1.1 version device
  749. */
  750. pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
  751. if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
  752. pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
  753. return -EINVAL;
  754. }
  755. }
  756. return 0;
  757. }
  758. static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
  759. {
  760. struct pcie_link_state *link;
  761. link = kzalloc(sizeof(*link), GFP_KERNEL);
  762. if (!link)
  763. return NULL;
  764. INIT_LIST_HEAD(&link->sibling);
  765. link->pdev = pdev;
  766. link->downstream = pci_function_0(pdev->subordinate);
  767. /*
  768. * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
  769. * hierarchies. Note that some PCIe host implementations omit
  770. * the root ports entirely, in which case a downstream port on
  771. * a switch may become the root of the link state chain for all
  772. * its subordinate endpoints.
  773. */
  774. if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
  775. pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
  776. !pdev->bus->parent->self) {
  777. link->root = link;
  778. } else {
  779. struct pcie_link_state *parent;
  780. parent = pdev->bus->parent->self->link_state;
  781. if (!parent) {
  782. kfree(link);
  783. return NULL;
  784. }
  785. link->parent = parent;
  786. link->root = link->parent->root;
  787. }
  788. list_add(&link->sibling, &link_list);
  789. pdev->link_state = link;
  790. return link;
  791. }
  792. static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
  793. {
  794. struct pci_dev *child;
  795. list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
  796. sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
  797. }
  798. /*
  799. * pcie_aspm_init_link_state: Initiate PCI express link state.
  800. * It is called after the pcie and its children devices are scanned.
  801. * @pdev: the root port or switch downstream port
  802. */
  803. void pcie_aspm_init_link_state(struct pci_dev *pdev)
  804. {
  805. struct pcie_link_state *link;
  806. int blacklist = !!pcie_aspm_sanity_check(pdev);
  807. if (!aspm_support_enabled)
  808. return;
  809. if (pdev->link_state)
  810. return;
  811. /*
  812. * We allocate pcie_link_state for the component on the upstream
  813. * end of a Link, so there's nothing to do unless this device is
  814. * downstream port.
  815. */
  816. if (!pcie_downstream_port(pdev))
  817. return;
  818. /* VIA has a strange chipset, root port is under a bridge */
  819. if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
  820. pdev->bus->self)
  821. return;
  822. down_read(&pci_bus_sem);
  823. if (list_empty(&pdev->subordinate->devices))
  824. goto out;
  825. mutex_lock(&aspm_lock);
  826. link = alloc_pcie_link_state(pdev);
  827. if (!link)
  828. goto unlock;
  829. /*
  830. * Setup initial ASPM state. Note that we need to configure
  831. * upstream links also because capable state of them can be
  832. * update through pcie_aspm_cap_init().
  833. */
  834. pcie_aspm_cap_init(link, blacklist);
  835. /* Setup initial Clock PM state */
  836. pcie_clkpm_cap_init(link, blacklist);
  837. /*
  838. * At this stage drivers haven't had an opportunity to change the
  839. * link policy setting. Enabling ASPM on broken hardware can cripple
  840. * it even before the driver has had a chance to disable ASPM, so
  841. * default to a safe level right now. If we're enabling ASPM beyond
  842. * the BIOS's expectation, we'll do so once pci_enable_device() is
  843. * called.
  844. */
  845. if (aspm_policy != POLICY_POWERSAVE &&
  846. aspm_policy != POLICY_POWER_SUPERSAVE) {
  847. pcie_config_aspm_path(link);
  848. pcie_set_clkpm(link, policy_to_clkpm_state(link));
  849. }
  850. pcie_aspm_update_sysfs_visibility(pdev);
  851. unlock:
  852. mutex_unlock(&aspm_lock);
  853. out:
  854. up_read(&pci_bus_sem);
  855. }
  856. /* Recheck latencies and update aspm_capable for links under the root */
  857. static void pcie_update_aspm_capable(struct pcie_link_state *root)
  858. {
  859. struct pcie_link_state *link;
  860. BUG_ON(root->parent);
  861. list_for_each_entry(link, &link_list, sibling) {
  862. if (link->root != root)
  863. continue;
  864. link->aspm_capable = link->aspm_support;
  865. }
  866. list_for_each_entry(link, &link_list, sibling) {
  867. struct pci_dev *child;
  868. struct pci_bus *linkbus = link->pdev->subordinate;
  869. if (link->root != root)
  870. continue;
  871. list_for_each_entry(child, &linkbus->devices, bus_list) {
  872. if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
  873. (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
  874. continue;
  875. pcie_aspm_check_latency(child);
  876. }
  877. }
  878. }
  879. /* @pdev: the endpoint device */
  880. void pcie_aspm_exit_link_state(struct pci_dev *pdev)
  881. {
  882. struct pci_dev *parent = pdev->bus->self;
  883. struct pcie_link_state *link, *root, *parent_link;
  884. if (!parent || !parent->link_state)
  885. return;
  886. down_read(&pci_bus_sem);
  887. mutex_lock(&aspm_lock);
  888. link = parent->link_state;
  889. root = link->root;
  890. parent_link = link->parent;
  891. /*
  892. * link->downstream is a pointer to the pci_dev of function 0. If
  893. * we remove that function, the pci_dev is about to be deallocated,
  894. * so we can't use link->downstream again. Free the link state to
  895. * avoid this.
  896. *
  897. * If we're removing a non-0 function, it's possible we could
  898. * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
  899. * programming the same ASPM Control value for all functions of
  900. * multi-function devices, so disable ASPM for all of them.
  901. */
  902. pcie_config_aspm_link(link, 0);
  903. list_del(&link->sibling);
  904. free_link_state(link);
  905. /* Recheck latencies and configure upstream links */
  906. if (parent_link) {
  907. pcie_update_aspm_capable(root);
  908. pcie_config_aspm_path(parent_link);
  909. }
  910. mutex_unlock(&aspm_lock);
  911. up_read(&pci_bus_sem);
  912. }
  913. void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
  914. {
  915. struct pcie_link_state *link = pdev->link_state;
  916. if (aspm_disabled || !link)
  917. return;
  918. if (aspm_policy != POLICY_POWERSAVE &&
  919. aspm_policy != POLICY_POWER_SUPERSAVE)
  920. return;
  921. down_read(&pci_bus_sem);
  922. mutex_lock(&aspm_lock);
  923. pcie_config_aspm_path(link);
  924. pcie_set_clkpm(link, policy_to_clkpm_state(link));
  925. mutex_unlock(&aspm_lock);
  926. up_read(&pci_bus_sem);
  927. }
  928. static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
  929. {
  930. struct pci_dev *bridge;
  931. if (!pci_is_pcie(pdev))
  932. return NULL;
  933. bridge = pci_upstream_bridge(pdev);
  934. if (!bridge || !pci_is_pcie(bridge))
  935. return NULL;
  936. return bridge->link_state;
  937. }
  938. static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
  939. {
  940. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  941. if (!link)
  942. return -EINVAL;
  943. /*
  944. * A driver requested that ASPM be disabled on this device, but
  945. * if we don't have permission to manage ASPM (e.g., on ACPI
  946. * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
  947. * the _OSC method), we can't honor that request. Windows has
  948. * a similar mechanism using "PciASPMOptOut", which is also
  949. * ignored in this situation.
  950. */
  951. if (aspm_disabled) {
  952. pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
  953. return -EPERM;
  954. }
  955. if (sem)
  956. down_read(&pci_bus_sem);
  957. mutex_lock(&aspm_lock);
  958. if (state & PCIE_LINK_STATE_L0S)
  959. link->aspm_disable |= ASPM_STATE_L0S;
  960. if (state & PCIE_LINK_STATE_L1)
  961. /* L1 PM substates require L1 */
  962. link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
  963. if (state & PCIE_LINK_STATE_L1_1)
  964. link->aspm_disable |= ASPM_STATE_L1_1;
  965. if (state & PCIE_LINK_STATE_L1_2)
  966. link->aspm_disable |= ASPM_STATE_L1_2;
  967. if (state & PCIE_LINK_STATE_L1_1_PCIPM)
  968. link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
  969. if (state & PCIE_LINK_STATE_L1_2_PCIPM)
  970. link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
  971. pcie_config_aspm_link(link, policy_to_aspm_state(link));
  972. if (state & PCIE_LINK_STATE_CLKPM)
  973. link->clkpm_disable = 1;
  974. pcie_set_clkpm(link, policy_to_clkpm_state(link));
  975. mutex_unlock(&aspm_lock);
  976. if (sem)
  977. up_read(&pci_bus_sem);
  978. return 0;
  979. }
  980. int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
  981. {
  982. return __pci_disable_link_state(pdev, state, false);
  983. }
  984. EXPORT_SYMBOL(pci_disable_link_state_locked);
  985. /**
  986. * pci_disable_link_state - Disable device's link state, so the link will
  987. * never enter specific states. Note that if the BIOS didn't grant ASPM
  988. * control to the OS, this does nothing because we can't touch the LNKCTL
  989. * register. Returns 0 or a negative errno.
  990. *
  991. * @pdev: PCI device
  992. * @state: ASPM link state to disable
  993. */
  994. int pci_disable_link_state(struct pci_dev *pdev, int state)
  995. {
  996. return __pci_disable_link_state(pdev, state, true);
  997. }
  998. EXPORT_SYMBOL(pci_disable_link_state);
  999. static int pcie_aspm_set_policy(const char *val,
  1000. const struct kernel_param *kp)
  1001. {
  1002. int i;
  1003. struct pcie_link_state *link;
  1004. if (aspm_disabled)
  1005. return -EPERM;
  1006. i = sysfs_match_string(policy_str, val);
  1007. if (i < 0)
  1008. return i;
  1009. if (i == aspm_policy)
  1010. return 0;
  1011. down_read(&pci_bus_sem);
  1012. mutex_lock(&aspm_lock);
  1013. aspm_policy = i;
  1014. list_for_each_entry(link, &link_list, sibling) {
  1015. pcie_config_aspm_link(link, policy_to_aspm_state(link));
  1016. pcie_set_clkpm(link, policy_to_clkpm_state(link));
  1017. }
  1018. mutex_unlock(&aspm_lock);
  1019. up_read(&pci_bus_sem);
  1020. return 0;
  1021. }
  1022. static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
  1023. {
  1024. int i, cnt = 0;
  1025. for (i = 0; i < ARRAY_SIZE(policy_str); i++)
  1026. if (i == aspm_policy)
  1027. cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
  1028. else
  1029. cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
  1030. cnt += sprintf(buffer + cnt, "\n");
  1031. return cnt;
  1032. }
  1033. module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
  1034. NULL, 0644);
  1035. /**
  1036. * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
  1037. * @pdev: Target device.
  1038. *
  1039. * Relies on the upstream bridge's link_state being valid. The link_state
  1040. * is deallocated only when the last child of the bridge (i.e., @pdev or a
  1041. * sibling) is removed, and the caller should be holding a reference to
  1042. * @pdev, so this should be safe.
  1043. */
  1044. bool pcie_aspm_enabled(struct pci_dev *pdev)
  1045. {
  1046. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  1047. if (!link)
  1048. return false;
  1049. return link->aspm_enabled;
  1050. }
  1051. EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
  1052. static ssize_t aspm_attr_show_common(struct device *dev,
  1053. struct device_attribute *attr,
  1054. char *buf, u8 state)
  1055. {
  1056. struct pci_dev *pdev = to_pci_dev(dev);
  1057. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  1058. return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
  1059. }
  1060. static ssize_t aspm_attr_store_common(struct device *dev,
  1061. struct device_attribute *attr,
  1062. const char *buf, size_t len, u8 state)
  1063. {
  1064. struct pci_dev *pdev = to_pci_dev(dev);
  1065. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  1066. bool state_enable;
  1067. if (kstrtobool(buf, &state_enable) < 0)
  1068. return -EINVAL;
  1069. down_read(&pci_bus_sem);
  1070. mutex_lock(&aspm_lock);
  1071. if (state_enable) {
  1072. link->aspm_disable &= ~state;
  1073. /* need to enable L1 for substates */
  1074. if (state & ASPM_STATE_L1SS)
  1075. link->aspm_disable &= ~ASPM_STATE_L1;
  1076. } else {
  1077. link->aspm_disable |= state;
  1078. if (state & ASPM_STATE_L1)
  1079. link->aspm_disable |= ASPM_STATE_L1SS;
  1080. }
  1081. pcie_config_aspm_link(link, policy_to_aspm_state(link));
  1082. mutex_unlock(&aspm_lock);
  1083. up_read(&pci_bus_sem);
  1084. return len;
  1085. }
  1086. #define ASPM_ATTR(_f, _s) \
  1087. static ssize_t _f##_show(struct device *dev, \
  1088. struct device_attribute *attr, char *buf) \
  1089. { return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
  1090. \
  1091. static ssize_t _f##_store(struct device *dev, \
  1092. struct device_attribute *attr, \
  1093. const char *buf, size_t len) \
  1094. { return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
  1095. ASPM_ATTR(l0s_aspm, L0S)
  1096. ASPM_ATTR(l1_aspm, L1)
  1097. ASPM_ATTR(l1_1_aspm, L1_1)
  1098. ASPM_ATTR(l1_2_aspm, L1_2)
  1099. ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
  1100. ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
  1101. static ssize_t clkpm_show(struct device *dev,
  1102. struct device_attribute *attr, char *buf)
  1103. {
  1104. struct pci_dev *pdev = to_pci_dev(dev);
  1105. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  1106. return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
  1107. }
  1108. static ssize_t clkpm_store(struct device *dev,
  1109. struct device_attribute *attr,
  1110. const char *buf, size_t len)
  1111. {
  1112. struct pci_dev *pdev = to_pci_dev(dev);
  1113. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  1114. bool state_enable;
  1115. if (kstrtobool(buf, &state_enable) < 0)
  1116. return -EINVAL;
  1117. down_read(&pci_bus_sem);
  1118. mutex_lock(&aspm_lock);
  1119. link->clkpm_disable = !state_enable;
  1120. pcie_set_clkpm(link, policy_to_clkpm_state(link));
  1121. mutex_unlock(&aspm_lock);
  1122. up_read(&pci_bus_sem);
  1123. return len;
  1124. }
  1125. static DEVICE_ATTR_RW(clkpm);
  1126. static DEVICE_ATTR_RW(l0s_aspm);
  1127. static DEVICE_ATTR_RW(l1_aspm);
  1128. static DEVICE_ATTR_RW(l1_1_aspm);
  1129. static DEVICE_ATTR_RW(l1_2_aspm);
  1130. static DEVICE_ATTR_RW(l1_1_pcipm);
  1131. static DEVICE_ATTR_RW(l1_2_pcipm);
  1132. static struct attribute *aspm_ctrl_attrs[] = {
  1133. &dev_attr_clkpm.attr,
  1134. &dev_attr_l0s_aspm.attr,
  1135. &dev_attr_l1_aspm.attr,
  1136. &dev_attr_l1_1_aspm.attr,
  1137. &dev_attr_l1_2_aspm.attr,
  1138. &dev_attr_l1_1_pcipm.attr,
  1139. &dev_attr_l1_2_pcipm.attr,
  1140. NULL
  1141. };
  1142. static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
  1143. struct attribute *a, int n)
  1144. {
  1145. struct device *dev = kobj_to_dev(kobj);
  1146. struct pci_dev *pdev = to_pci_dev(dev);
  1147. struct pcie_link_state *link = pcie_aspm_get_link(pdev);
  1148. static const u8 aspm_state_map[] = {
  1149. ASPM_STATE_L0S,
  1150. ASPM_STATE_L1,
  1151. ASPM_STATE_L1_1,
  1152. ASPM_STATE_L1_2,
  1153. ASPM_STATE_L1_1_PCIPM,
  1154. ASPM_STATE_L1_2_PCIPM,
  1155. };
  1156. if (aspm_disabled || !link)
  1157. return 0;
  1158. if (n == 0)
  1159. return link->clkpm_capable ? a->mode : 0;
  1160. return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
  1161. }
  1162. const struct attribute_group aspm_ctrl_attr_group = {
  1163. .name = "link",
  1164. .attrs = aspm_ctrl_attrs,
  1165. .is_visible = aspm_ctrl_attrs_are_visible,
  1166. };
  1167. static int __init pcie_aspm_disable(char *str)
  1168. {
  1169. if (!strcmp(str, "off")) {
  1170. aspm_policy = POLICY_DEFAULT;
  1171. aspm_disabled = 1;
  1172. aspm_support_enabled = false;
  1173. printk(KERN_INFO "PCIe ASPM is disabled\n");
  1174. } else if (!strcmp(str, "force")) {
  1175. aspm_force = 1;
  1176. printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
  1177. }
  1178. return 1;
  1179. }
  1180. __setup("pcie_aspm=", pcie_aspm_disable);
  1181. void pcie_no_aspm(void)
  1182. {
  1183. /*
  1184. * Disabling ASPM is intended to prevent the kernel from modifying
  1185. * existing hardware state, not to clear existing state. To that end:
  1186. * (a) set policy to POLICY_DEFAULT in order to avoid changing state
  1187. * (b) prevent userspace from changing policy
  1188. */
  1189. if (!aspm_force) {
  1190. aspm_policy = POLICY_DEFAULT;
  1191. aspm_disabled = 1;
  1192. }
  1193. }
  1194. bool pcie_aspm_support_enabled(void)
  1195. {
  1196. return aspm_support_enabled;
  1197. }