intel_idle.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * intel_idle.c - native hardware idle loop for modern Intel processors
  4. *
  5. * Copyright (c) 2013 - 2020, Intel Corporation.
  6. * Len Brown <[email protected]>
  7. * Rafael J. Wysocki <[email protected]>
  8. */
  9. /*
  10. * intel_idle is a cpuidle driver that loads on all Intel CPUs with MWAIT
  11. * in lieu of the legacy ACPI processor_idle driver. The intent is to
  12. * make Linux more efficient on these processors, as intel_idle knows
  13. * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
  14. */
  15. /*
  16. * Design Assumptions
  17. *
  18. * All CPUs have same idle states as boot CPU
  19. *
  20. * Chipset BM_STS (bus master status) bit is a NOP
  21. * for preventing entry into deep C-states
  22. *
  23. * CPU will flush caches as needed when entering a C-state via MWAIT
  24. * (in contrast to entering ACPI C3, in which case the WBINVD
  25. * instruction needs to be executed to flush the caches)
  26. */
  27. /*
  28. * Known limitations
  29. *
  30. * ACPI has a .suspend hack to turn off deep c-statees during suspend
  31. * to avoid complications with the lapic timer workaround.
  32. * Have not seen issues with suspend, but may need same workaround here.
  33. *
  34. */
  35. /* un-comment DEBUG to enable pr_debug() statements */
  36. /* #define DEBUG */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/acpi.h>
  39. #include <linux/kernel.h>
  40. #include <linux/cpuidle.h>
  41. #include <linux/tick.h>
  42. #include <trace/events/power.h>
  43. #include <linux/sched.h>
  44. #include <linux/sched/smt.h>
  45. #include <linux/notifier.h>
  46. #include <linux/cpu.h>
  47. #include <linux/moduleparam.h>
  48. #include <asm/cpu_device_id.h>
  49. #include <asm/intel-family.h>
  50. #include <asm/nospec-branch.h>
  51. #include <asm/mwait.h>
  52. #include <asm/msr.h>
  53. #include <asm/fpu/api.h>
  54. #define INTEL_IDLE_VERSION "0.5.1"
  55. static struct cpuidle_driver intel_idle_driver = {
  56. .name = "intel_idle",
  57. .owner = THIS_MODULE,
  58. };
  59. /* intel_idle.max_cstate=0 disables driver */
  60. static int max_cstate = CPUIDLE_STATE_MAX - 1;
  61. static unsigned int disabled_states_mask;
  62. static unsigned int preferred_states_mask;
  63. static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
  64. static unsigned long auto_demotion_disable_flags;
  65. static enum {
  66. C1E_PROMOTION_PRESERVE,
  67. C1E_PROMOTION_ENABLE,
  68. C1E_PROMOTION_DISABLE
  69. } c1e_promotion = C1E_PROMOTION_PRESERVE;
  70. struct idle_cpu {
  71. struct cpuidle_state *state_table;
  72. /*
  73. * Hardware C-state auto-demotion may not always be optimal.
  74. * Indicate which enable bits to clear here.
  75. */
  76. unsigned long auto_demotion_disable_flags;
  77. bool byt_auto_demotion_disable_flag;
  78. bool disable_promotion_to_c1e;
  79. bool use_acpi;
  80. };
  81. static const struct idle_cpu *icpu __initdata;
  82. static struct cpuidle_state *cpuidle_state_table __initdata;
  83. static unsigned int mwait_substates __initdata;
  84. /*
  85. * Enable interrupts before entering the C-state. On some platforms and for
  86. * some C-states, this may measurably decrease interrupt latency.
  87. */
  88. #define CPUIDLE_FLAG_IRQ_ENABLE BIT(14)
  89. /*
  90. * Enable this state by default even if the ACPI _CST does not list it.
  91. */
  92. #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
  93. /*
  94. * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
  95. * above.
  96. */
  97. #define CPUIDLE_FLAG_IBRS BIT(16)
  98. /*
  99. * Initialize large xstate for the C6-state entrance.
  100. */
  101. #define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
  102. /*
  103. * MWAIT takes an 8-bit "hint" in EAX "suggesting"
  104. * the C-state (top nibble) and sub-state (bottom nibble)
  105. * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
  106. *
  107. * We store the hint at the top of our "flags" for each state.
  108. */
  109. #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
  110. #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
  111. static __always_inline int __intel_idle(struct cpuidle_device *dev,
  112. struct cpuidle_driver *drv, int index)
  113. {
  114. struct cpuidle_state *state = &drv->states[index];
  115. unsigned long eax = flg2MWAIT(state->flags);
  116. unsigned long ecx = 1; /* break on interrupt flag */
  117. mwait_idle_with_hints(eax, ecx);
  118. return index;
  119. }
  120. /**
  121. * intel_idle - Ask the processor to enter the given idle state.
  122. * @dev: cpuidle device of the target CPU.
  123. * @drv: cpuidle driver (assumed to point to intel_idle_driver).
  124. * @index: Target idle state index.
  125. *
  126. * Use the MWAIT instruction to notify the processor that the CPU represented by
  127. * @dev is idle and it can try to enter the idle state corresponding to @index.
  128. *
  129. * If the local APIC timer is not known to be reliable in the target idle state,
  130. * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
  131. *
  132. * Must be called under local_irq_disable().
  133. */
  134. static __cpuidle int intel_idle(struct cpuidle_device *dev,
  135. struct cpuidle_driver *drv, int index)
  136. {
  137. return __intel_idle(dev, drv, index);
  138. }
  139. static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
  140. struct cpuidle_driver *drv, int index)
  141. {
  142. int ret;
  143. raw_local_irq_enable();
  144. ret = __intel_idle(dev, drv, index);
  145. /*
  146. * The lockdep hardirqs state may be changed to 'on' with timer
  147. * tick interrupt followed by __do_softirq(). Use local_irq_disable()
  148. * to keep the hardirqs state correct.
  149. */
  150. local_irq_disable();
  151. return ret;
  152. }
  153. static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
  154. struct cpuidle_driver *drv, int index)
  155. {
  156. bool smt_active = sched_smt_active();
  157. u64 spec_ctrl = spec_ctrl_current();
  158. int ret;
  159. if (smt_active)
  160. wrmsrl(MSR_IA32_SPEC_CTRL, 0);
  161. ret = __intel_idle(dev, drv, index);
  162. if (smt_active)
  163. wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
  164. return ret;
  165. }
  166. static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
  167. struct cpuidle_driver *drv, int index)
  168. {
  169. fpu_idle_fpregs();
  170. return __intel_idle(dev, drv, index);
  171. }
  172. /**
  173. * intel_idle_s2idle - Ask the processor to enter the given idle state.
  174. * @dev: cpuidle device of the target CPU.
  175. * @drv: cpuidle driver (assumed to point to intel_idle_driver).
  176. * @index: Target idle state index.
  177. *
  178. * Use the MWAIT instruction to notify the processor that the CPU represented by
  179. * @dev is idle and it can try to enter the idle state corresponding to @index.
  180. *
  181. * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
  182. * scheduler tick and suspended scheduler clock on the target CPU.
  183. */
  184. static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
  185. struct cpuidle_driver *drv, int index)
  186. {
  187. unsigned long ecx = 1; /* break on interrupt flag */
  188. struct cpuidle_state *state = &drv->states[index];
  189. unsigned long eax = flg2MWAIT(state->flags);
  190. if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
  191. fpu_idle_fpregs();
  192. mwait_idle_with_hints(eax, ecx);
  193. return 0;
  194. }
  195. /*
  196. * States are indexed by the cstate number,
  197. * which is also the index into the MWAIT hint array.
  198. * Thus C0 is a dummy.
  199. */
  200. static struct cpuidle_state nehalem_cstates[] __initdata = {
  201. {
  202. .name = "C1",
  203. .desc = "MWAIT 0x00",
  204. .flags = MWAIT2flg(0x00),
  205. .exit_latency = 3,
  206. .target_residency = 6,
  207. .enter = &intel_idle,
  208. .enter_s2idle = intel_idle_s2idle, },
  209. {
  210. .name = "C1E",
  211. .desc = "MWAIT 0x01",
  212. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  213. .exit_latency = 10,
  214. .target_residency = 20,
  215. .enter = &intel_idle,
  216. .enter_s2idle = intel_idle_s2idle, },
  217. {
  218. .name = "C3",
  219. .desc = "MWAIT 0x10",
  220. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  221. .exit_latency = 20,
  222. .target_residency = 80,
  223. .enter = &intel_idle,
  224. .enter_s2idle = intel_idle_s2idle, },
  225. {
  226. .name = "C6",
  227. .desc = "MWAIT 0x20",
  228. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  229. .exit_latency = 200,
  230. .target_residency = 800,
  231. .enter = &intel_idle,
  232. .enter_s2idle = intel_idle_s2idle, },
  233. {
  234. .enter = NULL }
  235. };
  236. static struct cpuidle_state snb_cstates[] __initdata = {
  237. {
  238. .name = "C1",
  239. .desc = "MWAIT 0x00",
  240. .flags = MWAIT2flg(0x00),
  241. .exit_latency = 2,
  242. .target_residency = 2,
  243. .enter = &intel_idle,
  244. .enter_s2idle = intel_idle_s2idle, },
  245. {
  246. .name = "C1E",
  247. .desc = "MWAIT 0x01",
  248. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  249. .exit_latency = 10,
  250. .target_residency = 20,
  251. .enter = &intel_idle,
  252. .enter_s2idle = intel_idle_s2idle, },
  253. {
  254. .name = "C3",
  255. .desc = "MWAIT 0x10",
  256. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  257. .exit_latency = 80,
  258. .target_residency = 211,
  259. .enter = &intel_idle,
  260. .enter_s2idle = intel_idle_s2idle, },
  261. {
  262. .name = "C6",
  263. .desc = "MWAIT 0x20",
  264. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  265. .exit_latency = 104,
  266. .target_residency = 345,
  267. .enter = &intel_idle,
  268. .enter_s2idle = intel_idle_s2idle, },
  269. {
  270. .name = "C7",
  271. .desc = "MWAIT 0x30",
  272. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  273. .exit_latency = 109,
  274. .target_residency = 345,
  275. .enter = &intel_idle,
  276. .enter_s2idle = intel_idle_s2idle, },
  277. {
  278. .enter = NULL }
  279. };
  280. static struct cpuidle_state byt_cstates[] __initdata = {
  281. {
  282. .name = "C1",
  283. .desc = "MWAIT 0x00",
  284. .flags = MWAIT2flg(0x00),
  285. .exit_latency = 1,
  286. .target_residency = 1,
  287. .enter = &intel_idle,
  288. .enter_s2idle = intel_idle_s2idle, },
  289. {
  290. .name = "C6N",
  291. .desc = "MWAIT 0x58",
  292. .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
  293. .exit_latency = 300,
  294. .target_residency = 275,
  295. .enter = &intel_idle,
  296. .enter_s2idle = intel_idle_s2idle, },
  297. {
  298. .name = "C6S",
  299. .desc = "MWAIT 0x52",
  300. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  301. .exit_latency = 500,
  302. .target_residency = 560,
  303. .enter = &intel_idle,
  304. .enter_s2idle = intel_idle_s2idle, },
  305. {
  306. .name = "C7",
  307. .desc = "MWAIT 0x60",
  308. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  309. .exit_latency = 1200,
  310. .target_residency = 4000,
  311. .enter = &intel_idle,
  312. .enter_s2idle = intel_idle_s2idle, },
  313. {
  314. .name = "C7S",
  315. .desc = "MWAIT 0x64",
  316. .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
  317. .exit_latency = 10000,
  318. .target_residency = 20000,
  319. .enter = &intel_idle,
  320. .enter_s2idle = intel_idle_s2idle, },
  321. {
  322. .enter = NULL }
  323. };
  324. static struct cpuidle_state cht_cstates[] __initdata = {
  325. {
  326. .name = "C1",
  327. .desc = "MWAIT 0x00",
  328. .flags = MWAIT2flg(0x00),
  329. .exit_latency = 1,
  330. .target_residency = 1,
  331. .enter = &intel_idle,
  332. .enter_s2idle = intel_idle_s2idle, },
  333. {
  334. .name = "C6N",
  335. .desc = "MWAIT 0x58",
  336. .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
  337. .exit_latency = 80,
  338. .target_residency = 275,
  339. .enter = &intel_idle,
  340. .enter_s2idle = intel_idle_s2idle, },
  341. {
  342. .name = "C6S",
  343. .desc = "MWAIT 0x52",
  344. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  345. .exit_latency = 200,
  346. .target_residency = 560,
  347. .enter = &intel_idle,
  348. .enter_s2idle = intel_idle_s2idle, },
  349. {
  350. .name = "C7",
  351. .desc = "MWAIT 0x60",
  352. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  353. .exit_latency = 1200,
  354. .target_residency = 4000,
  355. .enter = &intel_idle,
  356. .enter_s2idle = intel_idle_s2idle, },
  357. {
  358. .name = "C7S",
  359. .desc = "MWAIT 0x64",
  360. .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
  361. .exit_latency = 10000,
  362. .target_residency = 20000,
  363. .enter = &intel_idle,
  364. .enter_s2idle = intel_idle_s2idle, },
  365. {
  366. .enter = NULL }
  367. };
  368. static struct cpuidle_state ivb_cstates[] __initdata = {
  369. {
  370. .name = "C1",
  371. .desc = "MWAIT 0x00",
  372. .flags = MWAIT2flg(0x00),
  373. .exit_latency = 1,
  374. .target_residency = 1,
  375. .enter = &intel_idle,
  376. .enter_s2idle = intel_idle_s2idle, },
  377. {
  378. .name = "C1E",
  379. .desc = "MWAIT 0x01",
  380. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  381. .exit_latency = 10,
  382. .target_residency = 20,
  383. .enter = &intel_idle,
  384. .enter_s2idle = intel_idle_s2idle, },
  385. {
  386. .name = "C3",
  387. .desc = "MWAIT 0x10",
  388. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  389. .exit_latency = 59,
  390. .target_residency = 156,
  391. .enter = &intel_idle,
  392. .enter_s2idle = intel_idle_s2idle, },
  393. {
  394. .name = "C6",
  395. .desc = "MWAIT 0x20",
  396. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  397. .exit_latency = 80,
  398. .target_residency = 300,
  399. .enter = &intel_idle,
  400. .enter_s2idle = intel_idle_s2idle, },
  401. {
  402. .name = "C7",
  403. .desc = "MWAIT 0x30",
  404. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  405. .exit_latency = 87,
  406. .target_residency = 300,
  407. .enter = &intel_idle,
  408. .enter_s2idle = intel_idle_s2idle, },
  409. {
  410. .enter = NULL }
  411. };
  412. static struct cpuidle_state ivt_cstates[] __initdata = {
  413. {
  414. .name = "C1",
  415. .desc = "MWAIT 0x00",
  416. .flags = MWAIT2flg(0x00),
  417. .exit_latency = 1,
  418. .target_residency = 1,
  419. .enter = &intel_idle,
  420. .enter_s2idle = intel_idle_s2idle, },
  421. {
  422. .name = "C1E",
  423. .desc = "MWAIT 0x01",
  424. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  425. .exit_latency = 10,
  426. .target_residency = 80,
  427. .enter = &intel_idle,
  428. .enter_s2idle = intel_idle_s2idle, },
  429. {
  430. .name = "C3",
  431. .desc = "MWAIT 0x10",
  432. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  433. .exit_latency = 59,
  434. .target_residency = 156,
  435. .enter = &intel_idle,
  436. .enter_s2idle = intel_idle_s2idle, },
  437. {
  438. .name = "C6",
  439. .desc = "MWAIT 0x20",
  440. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  441. .exit_latency = 82,
  442. .target_residency = 300,
  443. .enter = &intel_idle,
  444. .enter_s2idle = intel_idle_s2idle, },
  445. {
  446. .enter = NULL }
  447. };
  448. static struct cpuidle_state ivt_cstates_4s[] __initdata = {
  449. {
  450. .name = "C1",
  451. .desc = "MWAIT 0x00",
  452. .flags = MWAIT2flg(0x00),
  453. .exit_latency = 1,
  454. .target_residency = 1,
  455. .enter = &intel_idle,
  456. .enter_s2idle = intel_idle_s2idle, },
  457. {
  458. .name = "C1E",
  459. .desc = "MWAIT 0x01",
  460. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  461. .exit_latency = 10,
  462. .target_residency = 250,
  463. .enter = &intel_idle,
  464. .enter_s2idle = intel_idle_s2idle, },
  465. {
  466. .name = "C3",
  467. .desc = "MWAIT 0x10",
  468. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  469. .exit_latency = 59,
  470. .target_residency = 300,
  471. .enter = &intel_idle,
  472. .enter_s2idle = intel_idle_s2idle, },
  473. {
  474. .name = "C6",
  475. .desc = "MWAIT 0x20",
  476. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  477. .exit_latency = 84,
  478. .target_residency = 400,
  479. .enter = &intel_idle,
  480. .enter_s2idle = intel_idle_s2idle, },
  481. {
  482. .enter = NULL }
  483. };
  484. static struct cpuidle_state ivt_cstates_8s[] __initdata = {
  485. {
  486. .name = "C1",
  487. .desc = "MWAIT 0x00",
  488. .flags = MWAIT2flg(0x00),
  489. .exit_latency = 1,
  490. .target_residency = 1,
  491. .enter = &intel_idle,
  492. .enter_s2idle = intel_idle_s2idle, },
  493. {
  494. .name = "C1E",
  495. .desc = "MWAIT 0x01",
  496. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  497. .exit_latency = 10,
  498. .target_residency = 500,
  499. .enter = &intel_idle,
  500. .enter_s2idle = intel_idle_s2idle, },
  501. {
  502. .name = "C3",
  503. .desc = "MWAIT 0x10",
  504. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  505. .exit_latency = 59,
  506. .target_residency = 600,
  507. .enter = &intel_idle,
  508. .enter_s2idle = intel_idle_s2idle, },
  509. {
  510. .name = "C6",
  511. .desc = "MWAIT 0x20",
  512. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  513. .exit_latency = 88,
  514. .target_residency = 700,
  515. .enter = &intel_idle,
  516. .enter_s2idle = intel_idle_s2idle, },
  517. {
  518. .enter = NULL }
  519. };
  520. static struct cpuidle_state hsw_cstates[] __initdata = {
  521. {
  522. .name = "C1",
  523. .desc = "MWAIT 0x00",
  524. .flags = MWAIT2flg(0x00),
  525. .exit_latency = 2,
  526. .target_residency = 2,
  527. .enter = &intel_idle,
  528. .enter_s2idle = intel_idle_s2idle, },
  529. {
  530. .name = "C1E",
  531. .desc = "MWAIT 0x01",
  532. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  533. .exit_latency = 10,
  534. .target_residency = 20,
  535. .enter = &intel_idle,
  536. .enter_s2idle = intel_idle_s2idle, },
  537. {
  538. .name = "C3",
  539. .desc = "MWAIT 0x10",
  540. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  541. .exit_latency = 33,
  542. .target_residency = 100,
  543. .enter = &intel_idle,
  544. .enter_s2idle = intel_idle_s2idle, },
  545. {
  546. .name = "C6",
  547. .desc = "MWAIT 0x20",
  548. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  549. .exit_latency = 133,
  550. .target_residency = 400,
  551. .enter = &intel_idle,
  552. .enter_s2idle = intel_idle_s2idle, },
  553. {
  554. .name = "C7s",
  555. .desc = "MWAIT 0x32",
  556. .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
  557. .exit_latency = 166,
  558. .target_residency = 500,
  559. .enter = &intel_idle,
  560. .enter_s2idle = intel_idle_s2idle, },
  561. {
  562. .name = "C8",
  563. .desc = "MWAIT 0x40",
  564. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  565. .exit_latency = 300,
  566. .target_residency = 900,
  567. .enter = &intel_idle,
  568. .enter_s2idle = intel_idle_s2idle, },
  569. {
  570. .name = "C9",
  571. .desc = "MWAIT 0x50",
  572. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
  573. .exit_latency = 600,
  574. .target_residency = 1800,
  575. .enter = &intel_idle,
  576. .enter_s2idle = intel_idle_s2idle, },
  577. {
  578. .name = "C10",
  579. .desc = "MWAIT 0x60",
  580. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  581. .exit_latency = 2600,
  582. .target_residency = 7700,
  583. .enter = &intel_idle,
  584. .enter_s2idle = intel_idle_s2idle, },
  585. {
  586. .enter = NULL }
  587. };
  588. static struct cpuidle_state bdw_cstates[] __initdata = {
  589. {
  590. .name = "C1",
  591. .desc = "MWAIT 0x00",
  592. .flags = MWAIT2flg(0x00),
  593. .exit_latency = 2,
  594. .target_residency = 2,
  595. .enter = &intel_idle,
  596. .enter_s2idle = intel_idle_s2idle, },
  597. {
  598. .name = "C1E",
  599. .desc = "MWAIT 0x01",
  600. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  601. .exit_latency = 10,
  602. .target_residency = 20,
  603. .enter = &intel_idle,
  604. .enter_s2idle = intel_idle_s2idle, },
  605. {
  606. .name = "C3",
  607. .desc = "MWAIT 0x10",
  608. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  609. .exit_latency = 40,
  610. .target_residency = 100,
  611. .enter = &intel_idle,
  612. .enter_s2idle = intel_idle_s2idle, },
  613. {
  614. .name = "C6",
  615. .desc = "MWAIT 0x20",
  616. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  617. .exit_latency = 133,
  618. .target_residency = 400,
  619. .enter = &intel_idle,
  620. .enter_s2idle = intel_idle_s2idle, },
  621. {
  622. .name = "C7s",
  623. .desc = "MWAIT 0x32",
  624. .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
  625. .exit_latency = 166,
  626. .target_residency = 500,
  627. .enter = &intel_idle,
  628. .enter_s2idle = intel_idle_s2idle, },
  629. {
  630. .name = "C8",
  631. .desc = "MWAIT 0x40",
  632. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  633. .exit_latency = 300,
  634. .target_residency = 900,
  635. .enter = &intel_idle,
  636. .enter_s2idle = intel_idle_s2idle, },
  637. {
  638. .name = "C9",
  639. .desc = "MWAIT 0x50",
  640. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
  641. .exit_latency = 600,
  642. .target_residency = 1800,
  643. .enter = &intel_idle,
  644. .enter_s2idle = intel_idle_s2idle, },
  645. {
  646. .name = "C10",
  647. .desc = "MWAIT 0x60",
  648. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  649. .exit_latency = 2600,
  650. .target_residency = 7700,
  651. .enter = &intel_idle,
  652. .enter_s2idle = intel_idle_s2idle, },
  653. {
  654. .enter = NULL }
  655. };
  656. static struct cpuidle_state skl_cstates[] __initdata = {
  657. {
  658. .name = "C1",
  659. .desc = "MWAIT 0x00",
  660. .flags = MWAIT2flg(0x00),
  661. .exit_latency = 2,
  662. .target_residency = 2,
  663. .enter = &intel_idle,
  664. .enter_s2idle = intel_idle_s2idle, },
  665. {
  666. .name = "C1E",
  667. .desc = "MWAIT 0x01",
  668. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  669. .exit_latency = 10,
  670. .target_residency = 20,
  671. .enter = &intel_idle,
  672. .enter_s2idle = intel_idle_s2idle, },
  673. {
  674. .name = "C3",
  675. .desc = "MWAIT 0x10",
  676. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  677. .exit_latency = 70,
  678. .target_residency = 100,
  679. .enter = &intel_idle,
  680. .enter_s2idle = intel_idle_s2idle, },
  681. {
  682. .name = "C6",
  683. .desc = "MWAIT 0x20",
  684. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  685. .exit_latency = 85,
  686. .target_residency = 200,
  687. .enter = &intel_idle,
  688. .enter_s2idle = intel_idle_s2idle, },
  689. {
  690. .name = "C7s",
  691. .desc = "MWAIT 0x33",
  692. .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  693. .exit_latency = 124,
  694. .target_residency = 800,
  695. .enter = &intel_idle,
  696. .enter_s2idle = intel_idle_s2idle, },
  697. {
  698. .name = "C8",
  699. .desc = "MWAIT 0x40",
  700. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  701. .exit_latency = 200,
  702. .target_residency = 800,
  703. .enter = &intel_idle,
  704. .enter_s2idle = intel_idle_s2idle, },
  705. {
  706. .name = "C9",
  707. .desc = "MWAIT 0x50",
  708. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  709. .exit_latency = 480,
  710. .target_residency = 5000,
  711. .enter = &intel_idle,
  712. .enter_s2idle = intel_idle_s2idle, },
  713. {
  714. .name = "C10",
  715. .desc = "MWAIT 0x60",
  716. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  717. .exit_latency = 890,
  718. .target_residency = 5000,
  719. .enter = &intel_idle,
  720. .enter_s2idle = intel_idle_s2idle, },
  721. {
  722. .enter = NULL }
  723. };
  724. static struct cpuidle_state skx_cstates[] __initdata = {
  725. {
  726. .name = "C1",
  727. .desc = "MWAIT 0x00",
  728. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
  729. .exit_latency = 2,
  730. .target_residency = 2,
  731. .enter = &intel_idle,
  732. .enter_s2idle = intel_idle_s2idle, },
  733. {
  734. .name = "C1E",
  735. .desc = "MWAIT 0x01",
  736. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  737. .exit_latency = 10,
  738. .target_residency = 20,
  739. .enter = &intel_idle,
  740. .enter_s2idle = intel_idle_s2idle, },
  741. {
  742. .name = "C6",
  743. .desc = "MWAIT 0x20",
  744. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  745. .exit_latency = 133,
  746. .target_residency = 600,
  747. .enter = &intel_idle,
  748. .enter_s2idle = intel_idle_s2idle, },
  749. {
  750. .enter = NULL }
  751. };
  752. static struct cpuidle_state icx_cstates[] __initdata = {
  753. {
  754. .name = "C1",
  755. .desc = "MWAIT 0x00",
  756. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
  757. .exit_latency = 1,
  758. .target_residency = 1,
  759. .enter = &intel_idle,
  760. .enter_s2idle = intel_idle_s2idle, },
  761. {
  762. .name = "C1E",
  763. .desc = "MWAIT 0x01",
  764. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  765. .exit_latency = 4,
  766. .target_residency = 4,
  767. .enter = &intel_idle,
  768. .enter_s2idle = intel_idle_s2idle, },
  769. {
  770. .name = "C6",
  771. .desc = "MWAIT 0x20",
  772. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  773. .exit_latency = 170,
  774. .target_residency = 600,
  775. .enter = &intel_idle,
  776. .enter_s2idle = intel_idle_s2idle, },
  777. {
  778. .enter = NULL }
  779. };
  780. /*
  781. * On AlderLake C1 has to be disabled if C1E is enabled, and vice versa.
  782. * C1E is enabled only if "C1E promotion" bit is set in MSR_IA32_POWER_CTL.
  783. * But in this case there is effectively no C1, because C1 requests are
  784. * promoted to C1E. If the "C1E promotion" bit is cleared, then both C1
  785. * and C1E requests end up with C1, so there is effectively no C1E.
  786. *
  787. * By default we enable C1E and disable C1 by marking it with
  788. * 'CPUIDLE_FLAG_UNUSABLE'.
  789. */
  790. static struct cpuidle_state adl_cstates[] __initdata = {
  791. {
  792. .name = "C1",
  793. .desc = "MWAIT 0x00",
  794. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
  795. .exit_latency = 1,
  796. .target_residency = 1,
  797. .enter = &intel_idle,
  798. .enter_s2idle = intel_idle_s2idle, },
  799. {
  800. .name = "C1E",
  801. .desc = "MWAIT 0x01",
  802. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  803. .exit_latency = 2,
  804. .target_residency = 4,
  805. .enter = &intel_idle,
  806. .enter_s2idle = intel_idle_s2idle, },
  807. {
  808. .name = "C6",
  809. .desc = "MWAIT 0x20",
  810. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  811. .exit_latency = 220,
  812. .target_residency = 600,
  813. .enter = &intel_idle,
  814. .enter_s2idle = intel_idle_s2idle, },
  815. {
  816. .name = "C8",
  817. .desc = "MWAIT 0x40",
  818. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  819. .exit_latency = 280,
  820. .target_residency = 800,
  821. .enter = &intel_idle,
  822. .enter_s2idle = intel_idle_s2idle, },
  823. {
  824. .name = "C10",
  825. .desc = "MWAIT 0x60",
  826. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  827. .exit_latency = 680,
  828. .target_residency = 2000,
  829. .enter = &intel_idle,
  830. .enter_s2idle = intel_idle_s2idle, },
  831. {
  832. .enter = NULL }
  833. };
  834. static struct cpuidle_state adl_l_cstates[] __initdata = {
  835. {
  836. .name = "C1",
  837. .desc = "MWAIT 0x00",
  838. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
  839. .exit_latency = 1,
  840. .target_residency = 1,
  841. .enter = &intel_idle,
  842. .enter_s2idle = intel_idle_s2idle, },
  843. {
  844. .name = "C1E",
  845. .desc = "MWAIT 0x01",
  846. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  847. .exit_latency = 2,
  848. .target_residency = 4,
  849. .enter = &intel_idle,
  850. .enter_s2idle = intel_idle_s2idle, },
  851. {
  852. .name = "C6",
  853. .desc = "MWAIT 0x20",
  854. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  855. .exit_latency = 170,
  856. .target_residency = 500,
  857. .enter = &intel_idle,
  858. .enter_s2idle = intel_idle_s2idle, },
  859. {
  860. .name = "C8",
  861. .desc = "MWAIT 0x40",
  862. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  863. .exit_latency = 200,
  864. .target_residency = 600,
  865. .enter = &intel_idle,
  866. .enter_s2idle = intel_idle_s2idle, },
  867. {
  868. .name = "C10",
  869. .desc = "MWAIT 0x60",
  870. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  871. .exit_latency = 230,
  872. .target_residency = 700,
  873. .enter = &intel_idle,
  874. .enter_s2idle = intel_idle_s2idle, },
  875. {
  876. .enter = NULL }
  877. };
  878. static struct cpuidle_state adl_n_cstates[] __initdata = {
  879. {
  880. .name = "C1",
  881. .desc = "MWAIT 0x00",
  882. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
  883. .exit_latency = 1,
  884. .target_residency = 1,
  885. .enter = &intel_idle,
  886. .enter_s2idle = intel_idle_s2idle, },
  887. {
  888. .name = "C1E",
  889. .desc = "MWAIT 0x01",
  890. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  891. .exit_latency = 2,
  892. .target_residency = 4,
  893. .enter = &intel_idle,
  894. .enter_s2idle = intel_idle_s2idle, },
  895. {
  896. .name = "C6",
  897. .desc = "MWAIT 0x20",
  898. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  899. .exit_latency = 195,
  900. .target_residency = 585,
  901. .enter = &intel_idle,
  902. .enter_s2idle = intel_idle_s2idle, },
  903. {
  904. .name = "C8",
  905. .desc = "MWAIT 0x40",
  906. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  907. .exit_latency = 260,
  908. .target_residency = 1040,
  909. .enter = &intel_idle,
  910. .enter_s2idle = intel_idle_s2idle, },
  911. {
  912. .name = "C10",
  913. .desc = "MWAIT 0x60",
  914. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  915. .exit_latency = 660,
  916. .target_residency = 1980,
  917. .enter = &intel_idle,
  918. .enter_s2idle = intel_idle_s2idle, },
  919. {
  920. .enter = NULL }
  921. };
  922. static struct cpuidle_state spr_cstates[] __initdata = {
  923. {
  924. .name = "C1",
  925. .desc = "MWAIT 0x00",
  926. .flags = MWAIT2flg(0x00),
  927. .exit_latency = 1,
  928. .target_residency = 1,
  929. .enter = &intel_idle,
  930. .enter_s2idle = intel_idle_s2idle, },
  931. {
  932. .name = "C1E",
  933. .desc = "MWAIT 0x01",
  934. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  935. .exit_latency = 2,
  936. .target_residency = 4,
  937. .enter = &intel_idle,
  938. .enter_s2idle = intel_idle_s2idle, },
  939. {
  940. .name = "C6",
  941. .desc = "MWAIT 0x20",
  942. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
  943. CPUIDLE_FLAG_INIT_XSTATE,
  944. .exit_latency = 290,
  945. .target_residency = 800,
  946. .enter = &intel_idle,
  947. .enter_s2idle = intel_idle_s2idle, },
  948. {
  949. .enter = NULL }
  950. };
  951. static struct cpuidle_state atom_cstates[] __initdata = {
  952. {
  953. .name = "C1E",
  954. .desc = "MWAIT 0x00",
  955. .flags = MWAIT2flg(0x00),
  956. .exit_latency = 10,
  957. .target_residency = 20,
  958. .enter = &intel_idle,
  959. .enter_s2idle = intel_idle_s2idle, },
  960. {
  961. .name = "C2",
  962. .desc = "MWAIT 0x10",
  963. .flags = MWAIT2flg(0x10),
  964. .exit_latency = 20,
  965. .target_residency = 80,
  966. .enter = &intel_idle,
  967. .enter_s2idle = intel_idle_s2idle, },
  968. {
  969. .name = "C4",
  970. .desc = "MWAIT 0x30",
  971. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  972. .exit_latency = 100,
  973. .target_residency = 400,
  974. .enter = &intel_idle,
  975. .enter_s2idle = intel_idle_s2idle, },
  976. {
  977. .name = "C6",
  978. .desc = "MWAIT 0x52",
  979. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  980. .exit_latency = 140,
  981. .target_residency = 560,
  982. .enter = &intel_idle,
  983. .enter_s2idle = intel_idle_s2idle, },
  984. {
  985. .enter = NULL }
  986. };
  987. static struct cpuidle_state tangier_cstates[] __initdata = {
  988. {
  989. .name = "C1",
  990. .desc = "MWAIT 0x00",
  991. .flags = MWAIT2flg(0x00),
  992. .exit_latency = 1,
  993. .target_residency = 4,
  994. .enter = &intel_idle,
  995. .enter_s2idle = intel_idle_s2idle, },
  996. {
  997. .name = "C4",
  998. .desc = "MWAIT 0x30",
  999. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  1000. .exit_latency = 100,
  1001. .target_residency = 400,
  1002. .enter = &intel_idle,
  1003. .enter_s2idle = intel_idle_s2idle, },
  1004. {
  1005. .name = "C6",
  1006. .desc = "MWAIT 0x52",
  1007. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  1008. .exit_latency = 140,
  1009. .target_residency = 560,
  1010. .enter = &intel_idle,
  1011. .enter_s2idle = intel_idle_s2idle, },
  1012. {
  1013. .name = "C7",
  1014. .desc = "MWAIT 0x60",
  1015. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  1016. .exit_latency = 1200,
  1017. .target_residency = 4000,
  1018. .enter = &intel_idle,
  1019. .enter_s2idle = intel_idle_s2idle, },
  1020. {
  1021. .name = "C9",
  1022. .desc = "MWAIT 0x64",
  1023. .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
  1024. .exit_latency = 10000,
  1025. .target_residency = 20000,
  1026. .enter = &intel_idle,
  1027. .enter_s2idle = intel_idle_s2idle, },
  1028. {
  1029. .enter = NULL }
  1030. };
  1031. static struct cpuidle_state avn_cstates[] __initdata = {
  1032. {
  1033. .name = "C1",
  1034. .desc = "MWAIT 0x00",
  1035. .flags = MWAIT2flg(0x00),
  1036. .exit_latency = 2,
  1037. .target_residency = 2,
  1038. .enter = &intel_idle,
  1039. .enter_s2idle = intel_idle_s2idle, },
  1040. {
  1041. .name = "C6",
  1042. .desc = "MWAIT 0x51",
  1043. .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
  1044. .exit_latency = 15,
  1045. .target_residency = 45,
  1046. .enter = &intel_idle,
  1047. .enter_s2idle = intel_idle_s2idle, },
  1048. {
  1049. .enter = NULL }
  1050. };
  1051. static struct cpuidle_state knl_cstates[] __initdata = {
  1052. {
  1053. .name = "C1",
  1054. .desc = "MWAIT 0x00",
  1055. .flags = MWAIT2flg(0x00),
  1056. .exit_latency = 1,
  1057. .target_residency = 2,
  1058. .enter = &intel_idle,
  1059. .enter_s2idle = intel_idle_s2idle },
  1060. {
  1061. .name = "C6",
  1062. .desc = "MWAIT 0x10",
  1063. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  1064. .exit_latency = 120,
  1065. .target_residency = 500,
  1066. .enter = &intel_idle,
  1067. .enter_s2idle = intel_idle_s2idle },
  1068. {
  1069. .enter = NULL }
  1070. };
  1071. static struct cpuidle_state bxt_cstates[] __initdata = {
  1072. {
  1073. .name = "C1",
  1074. .desc = "MWAIT 0x00",
  1075. .flags = MWAIT2flg(0x00),
  1076. .exit_latency = 2,
  1077. .target_residency = 2,
  1078. .enter = &intel_idle,
  1079. .enter_s2idle = intel_idle_s2idle, },
  1080. {
  1081. .name = "C1E",
  1082. .desc = "MWAIT 0x01",
  1083. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1084. .exit_latency = 10,
  1085. .target_residency = 20,
  1086. .enter = &intel_idle,
  1087. .enter_s2idle = intel_idle_s2idle, },
  1088. {
  1089. .name = "C6",
  1090. .desc = "MWAIT 0x20",
  1091. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  1092. .exit_latency = 133,
  1093. .target_residency = 133,
  1094. .enter = &intel_idle,
  1095. .enter_s2idle = intel_idle_s2idle, },
  1096. {
  1097. .name = "C7s",
  1098. .desc = "MWAIT 0x31",
  1099. .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
  1100. .exit_latency = 155,
  1101. .target_residency = 155,
  1102. .enter = &intel_idle,
  1103. .enter_s2idle = intel_idle_s2idle, },
  1104. {
  1105. .name = "C8",
  1106. .desc = "MWAIT 0x40",
  1107. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  1108. .exit_latency = 1000,
  1109. .target_residency = 1000,
  1110. .enter = &intel_idle,
  1111. .enter_s2idle = intel_idle_s2idle, },
  1112. {
  1113. .name = "C9",
  1114. .desc = "MWAIT 0x50",
  1115. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
  1116. .exit_latency = 2000,
  1117. .target_residency = 2000,
  1118. .enter = &intel_idle,
  1119. .enter_s2idle = intel_idle_s2idle, },
  1120. {
  1121. .name = "C10",
  1122. .desc = "MWAIT 0x60",
  1123. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  1124. .exit_latency = 10000,
  1125. .target_residency = 10000,
  1126. .enter = &intel_idle,
  1127. .enter_s2idle = intel_idle_s2idle, },
  1128. {
  1129. .enter = NULL }
  1130. };
  1131. static struct cpuidle_state dnv_cstates[] __initdata = {
  1132. {
  1133. .name = "C1",
  1134. .desc = "MWAIT 0x00",
  1135. .flags = MWAIT2flg(0x00),
  1136. .exit_latency = 2,
  1137. .target_residency = 2,
  1138. .enter = &intel_idle,
  1139. .enter_s2idle = intel_idle_s2idle, },
  1140. {
  1141. .name = "C1E",
  1142. .desc = "MWAIT 0x01",
  1143. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1144. .exit_latency = 10,
  1145. .target_residency = 20,
  1146. .enter = &intel_idle,
  1147. .enter_s2idle = intel_idle_s2idle, },
  1148. {
  1149. .name = "C6",
  1150. .desc = "MWAIT 0x20",
  1151. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  1152. .exit_latency = 50,
  1153. .target_residency = 500,
  1154. .enter = &intel_idle,
  1155. .enter_s2idle = intel_idle_s2idle, },
  1156. {
  1157. .enter = NULL }
  1158. };
  1159. /*
  1160. * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
  1161. * C6, and this is indicated in the CPUID mwait leaf.
  1162. */
  1163. static struct cpuidle_state snr_cstates[] __initdata = {
  1164. {
  1165. .name = "C1",
  1166. .desc = "MWAIT 0x00",
  1167. .flags = MWAIT2flg(0x00),
  1168. .exit_latency = 2,
  1169. .target_residency = 2,
  1170. .enter = &intel_idle,
  1171. .enter_s2idle = intel_idle_s2idle, },
  1172. {
  1173. .name = "C1E",
  1174. .desc = "MWAIT 0x01",
  1175. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1176. .exit_latency = 15,
  1177. .target_residency = 25,
  1178. .enter = &intel_idle,
  1179. .enter_s2idle = intel_idle_s2idle, },
  1180. {
  1181. .name = "C6",
  1182. .desc = "MWAIT 0x20",
  1183. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  1184. .exit_latency = 130,
  1185. .target_residency = 500,
  1186. .enter = &intel_idle,
  1187. .enter_s2idle = intel_idle_s2idle, },
  1188. {
  1189. .enter = NULL }
  1190. };
  1191. static const struct idle_cpu idle_cpu_nehalem __initconst = {
  1192. .state_table = nehalem_cstates,
  1193. .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
  1194. .disable_promotion_to_c1e = true,
  1195. };
  1196. static const struct idle_cpu idle_cpu_nhx __initconst = {
  1197. .state_table = nehalem_cstates,
  1198. .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
  1199. .disable_promotion_to_c1e = true,
  1200. .use_acpi = true,
  1201. };
  1202. static const struct idle_cpu idle_cpu_atom __initconst = {
  1203. .state_table = atom_cstates,
  1204. };
  1205. static const struct idle_cpu idle_cpu_tangier __initconst = {
  1206. .state_table = tangier_cstates,
  1207. };
  1208. static const struct idle_cpu idle_cpu_lincroft __initconst = {
  1209. .state_table = atom_cstates,
  1210. .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
  1211. };
  1212. static const struct idle_cpu idle_cpu_snb __initconst = {
  1213. .state_table = snb_cstates,
  1214. .disable_promotion_to_c1e = true,
  1215. };
  1216. static const struct idle_cpu idle_cpu_snx __initconst = {
  1217. .state_table = snb_cstates,
  1218. .disable_promotion_to_c1e = true,
  1219. .use_acpi = true,
  1220. };
  1221. static const struct idle_cpu idle_cpu_byt __initconst = {
  1222. .state_table = byt_cstates,
  1223. .disable_promotion_to_c1e = true,
  1224. .byt_auto_demotion_disable_flag = true,
  1225. };
  1226. static const struct idle_cpu idle_cpu_cht __initconst = {
  1227. .state_table = cht_cstates,
  1228. .disable_promotion_to_c1e = true,
  1229. .byt_auto_demotion_disable_flag = true,
  1230. };
  1231. static const struct idle_cpu idle_cpu_ivb __initconst = {
  1232. .state_table = ivb_cstates,
  1233. .disable_promotion_to_c1e = true,
  1234. };
  1235. static const struct idle_cpu idle_cpu_ivt __initconst = {
  1236. .state_table = ivt_cstates,
  1237. .disable_promotion_to_c1e = true,
  1238. .use_acpi = true,
  1239. };
  1240. static const struct idle_cpu idle_cpu_hsw __initconst = {
  1241. .state_table = hsw_cstates,
  1242. .disable_promotion_to_c1e = true,
  1243. };
  1244. static const struct idle_cpu idle_cpu_hsx __initconst = {
  1245. .state_table = hsw_cstates,
  1246. .disable_promotion_to_c1e = true,
  1247. .use_acpi = true,
  1248. };
  1249. static const struct idle_cpu idle_cpu_bdw __initconst = {
  1250. .state_table = bdw_cstates,
  1251. .disable_promotion_to_c1e = true,
  1252. };
  1253. static const struct idle_cpu idle_cpu_bdx __initconst = {
  1254. .state_table = bdw_cstates,
  1255. .disable_promotion_to_c1e = true,
  1256. .use_acpi = true,
  1257. };
  1258. static const struct idle_cpu idle_cpu_skl __initconst = {
  1259. .state_table = skl_cstates,
  1260. .disable_promotion_to_c1e = true,
  1261. };
  1262. static const struct idle_cpu idle_cpu_skx __initconst = {
  1263. .state_table = skx_cstates,
  1264. .disable_promotion_to_c1e = true,
  1265. .use_acpi = true,
  1266. };
  1267. static const struct idle_cpu idle_cpu_icx __initconst = {
  1268. .state_table = icx_cstates,
  1269. .disable_promotion_to_c1e = true,
  1270. .use_acpi = true,
  1271. };
  1272. static const struct idle_cpu idle_cpu_adl __initconst = {
  1273. .state_table = adl_cstates,
  1274. };
  1275. static const struct idle_cpu idle_cpu_adl_l __initconst = {
  1276. .state_table = adl_l_cstates,
  1277. };
  1278. static const struct idle_cpu idle_cpu_adl_n __initconst = {
  1279. .state_table = adl_n_cstates,
  1280. };
  1281. static const struct idle_cpu idle_cpu_spr __initconst = {
  1282. .state_table = spr_cstates,
  1283. .disable_promotion_to_c1e = true,
  1284. .use_acpi = true,
  1285. };
  1286. static const struct idle_cpu idle_cpu_avn __initconst = {
  1287. .state_table = avn_cstates,
  1288. .disable_promotion_to_c1e = true,
  1289. .use_acpi = true,
  1290. };
  1291. static const struct idle_cpu idle_cpu_knl __initconst = {
  1292. .state_table = knl_cstates,
  1293. .use_acpi = true,
  1294. };
  1295. static const struct idle_cpu idle_cpu_bxt __initconst = {
  1296. .state_table = bxt_cstates,
  1297. .disable_promotion_to_c1e = true,
  1298. };
  1299. static const struct idle_cpu idle_cpu_dnv __initconst = {
  1300. .state_table = dnv_cstates,
  1301. .disable_promotion_to_c1e = true,
  1302. .use_acpi = true,
  1303. };
  1304. static const struct idle_cpu idle_cpu_snr __initconst = {
  1305. .state_table = snr_cstates,
  1306. .disable_promotion_to_c1e = true,
  1307. .use_acpi = true,
  1308. };
  1309. static const struct x86_cpu_id intel_idle_ids[] __initconst = {
  1310. X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
  1311. X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
  1312. X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem),
  1313. X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem),
  1314. X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx),
  1315. X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx),
  1316. X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom),
  1317. X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft),
  1318. X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx),
  1319. X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb),
  1320. X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx),
  1321. X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom),
  1322. X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt),
  1323. X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier),
  1324. X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht),
  1325. X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb),
  1326. X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt),
  1327. X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw),
  1328. X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx),
  1329. X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw),
  1330. X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw),
  1331. X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn),
  1332. X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw),
  1333. X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw),
  1334. X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx),
  1335. X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx),
  1336. X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl),
  1337. X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl),
  1338. X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
  1339. X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
  1340. X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
  1341. X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
  1342. X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
  1343. X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
  1344. X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
  1345. X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
  1346. X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
  1347. X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
  1348. X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
  1349. X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
  1350. X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
  1351. X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
  1352. X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
  1353. X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
  1354. {}
  1355. };
  1356. static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
  1357. X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
  1358. {}
  1359. };
  1360. static bool __init intel_idle_max_cstate_reached(int cstate)
  1361. {
  1362. if (cstate + 1 > max_cstate) {
  1363. pr_info("max_cstate %d reached\n", max_cstate);
  1364. return true;
  1365. }
  1366. return false;
  1367. }
  1368. static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
  1369. {
  1370. unsigned long eax = flg2MWAIT(state->flags);
  1371. if (boot_cpu_has(X86_FEATURE_ARAT))
  1372. return false;
  1373. /*
  1374. * Switch over to one-shot tick broadcast if the target C-state
  1375. * is deeper than C1.
  1376. */
  1377. return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
  1378. }
  1379. #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  1380. #include <acpi/processor.h>
  1381. static bool no_acpi __read_mostly;
  1382. module_param(no_acpi, bool, 0444);
  1383. MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
  1384. static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
  1385. module_param_named(use_acpi, force_use_acpi, bool, 0444);
  1386. MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
  1387. static struct acpi_processor_power acpi_state_table __initdata;
  1388. /**
  1389. * intel_idle_cst_usable - Check if the _CST information can be used.
  1390. *
  1391. * Check if all of the C-states listed by _CST in the max_cstate range are
  1392. * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
  1393. */
  1394. static bool __init intel_idle_cst_usable(void)
  1395. {
  1396. int cstate, limit;
  1397. limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
  1398. acpi_state_table.count);
  1399. for (cstate = 1; cstate < limit; cstate++) {
  1400. struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
  1401. if (cx->entry_method != ACPI_CSTATE_FFH)
  1402. return false;
  1403. }
  1404. return true;
  1405. }
  1406. static bool __init intel_idle_acpi_cst_extract(void)
  1407. {
  1408. unsigned int cpu;
  1409. if (no_acpi) {
  1410. pr_debug("Not allowed to use ACPI _CST\n");
  1411. return false;
  1412. }
  1413. for_each_possible_cpu(cpu) {
  1414. struct acpi_processor *pr = per_cpu(processors, cpu);
  1415. if (!pr)
  1416. continue;
  1417. if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
  1418. continue;
  1419. acpi_state_table.count++;
  1420. if (!intel_idle_cst_usable())
  1421. continue;
  1422. if (!acpi_processor_claim_cst_control())
  1423. break;
  1424. return true;
  1425. }
  1426. acpi_state_table.count = 0;
  1427. pr_debug("ACPI _CST not found or not usable\n");
  1428. return false;
  1429. }
  1430. static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
  1431. {
  1432. int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
  1433. /*
  1434. * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
  1435. * the interesting states are ACPI_CSTATE_FFH.
  1436. */
  1437. for (cstate = 1; cstate < limit; cstate++) {
  1438. struct acpi_processor_cx *cx;
  1439. struct cpuidle_state *state;
  1440. if (intel_idle_max_cstate_reached(cstate - 1))
  1441. break;
  1442. cx = &acpi_state_table.states[cstate];
  1443. state = &drv->states[drv->state_count++];
  1444. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
  1445. strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  1446. state->exit_latency = cx->latency;
  1447. /*
  1448. * For C1-type C-states use the same number for both the exit
  1449. * latency and target residency, because that is the case for
  1450. * C1 in the majority of the static C-states tables above.
  1451. * For the other types of C-states, however, set the target
  1452. * residency to 3 times the exit latency which should lead to
  1453. * a reasonable balance between energy-efficiency and
  1454. * performance in the majority of interesting cases.
  1455. */
  1456. state->target_residency = cx->latency;
  1457. if (cx->type > ACPI_STATE_C1)
  1458. state->target_residency *= 3;
  1459. state->flags = MWAIT2flg(cx->address);
  1460. if (cx->type > ACPI_STATE_C2)
  1461. state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
  1462. if (disabled_states_mask & BIT(cstate))
  1463. state->flags |= CPUIDLE_FLAG_OFF;
  1464. if (intel_idle_state_needs_timer_stop(state))
  1465. state->flags |= CPUIDLE_FLAG_TIMER_STOP;
  1466. state->enter = intel_idle;
  1467. state->enter_s2idle = intel_idle_s2idle;
  1468. }
  1469. }
  1470. static bool __init intel_idle_off_by_default(u32 mwait_hint)
  1471. {
  1472. int cstate, limit;
  1473. /*
  1474. * If there are no _CST C-states, do not disable any C-states by
  1475. * default.
  1476. */
  1477. if (!acpi_state_table.count)
  1478. return false;
  1479. limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
  1480. /*
  1481. * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
  1482. * the interesting states are ACPI_CSTATE_FFH.
  1483. */
  1484. for (cstate = 1; cstate < limit; cstate++) {
  1485. if (acpi_state_table.states[cstate].address == mwait_hint)
  1486. return false;
  1487. }
  1488. return true;
  1489. }
  1490. #else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
  1491. #define force_use_acpi (false)
  1492. static inline bool intel_idle_acpi_cst_extract(void) { return false; }
  1493. static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
  1494. static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
  1495. #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
  1496. /**
  1497. * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
  1498. *
  1499. * Tune IVT multi-socket targets.
  1500. * Assumption: num_sockets == (max_package_num + 1).
  1501. */
  1502. static void __init ivt_idle_state_table_update(void)
  1503. {
  1504. /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
  1505. int cpu, package_num, num_sockets = 1;
  1506. for_each_online_cpu(cpu) {
  1507. package_num = topology_physical_package_id(cpu);
  1508. if (package_num + 1 > num_sockets) {
  1509. num_sockets = package_num + 1;
  1510. if (num_sockets > 4) {
  1511. cpuidle_state_table = ivt_cstates_8s;
  1512. return;
  1513. }
  1514. }
  1515. }
  1516. if (num_sockets > 2)
  1517. cpuidle_state_table = ivt_cstates_4s;
  1518. /* else, 1 and 2 socket systems use default ivt_cstates */
  1519. }
  1520. /**
  1521. * irtl_2_usec - IRTL to microseconds conversion.
  1522. * @irtl: IRTL MSR value.
  1523. *
  1524. * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
  1525. */
  1526. static unsigned long long __init irtl_2_usec(unsigned long long irtl)
  1527. {
  1528. static const unsigned int irtl_ns_units[] __initconst = {
  1529. 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
  1530. };
  1531. unsigned long long ns;
  1532. if (!irtl)
  1533. return 0;
  1534. ns = irtl_ns_units[(irtl >> 10) & 0x7];
  1535. return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
  1536. }
  1537. /**
  1538. * bxt_idle_state_table_update - Fix up the Broxton idle states table.
  1539. *
  1540. * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the
  1541. * definitive maximum latency and use the same value for target_residency.
  1542. */
  1543. static void __init bxt_idle_state_table_update(void)
  1544. {
  1545. unsigned long long msr;
  1546. unsigned int usec;
  1547. rdmsrl(MSR_PKGC6_IRTL, msr);
  1548. usec = irtl_2_usec(msr);
  1549. if (usec) {
  1550. bxt_cstates[2].exit_latency = usec;
  1551. bxt_cstates[2].target_residency = usec;
  1552. }
  1553. rdmsrl(MSR_PKGC7_IRTL, msr);
  1554. usec = irtl_2_usec(msr);
  1555. if (usec) {
  1556. bxt_cstates[3].exit_latency = usec;
  1557. bxt_cstates[3].target_residency = usec;
  1558. }
  1559. rdmsrl(MSR_PKGC8_IRTL, msr);
  1560. usec = irtl_2_usec(msr);
  1561. if (usec) {
  1562. bxt_cstates[4].exit_latency = usec;
  1563. bxt_cstates[4].target_residency = usec;
  1564. }
  1565. rdmsrl(MSR_PKGC9_IRTL, msr);
  1566. usec = irtl_2_usec(msr);
  1567. if (usec) {
  1568. bxt_cstates[5].exit_latency = usec;
  1569. bxt_cstates[5].target_residency = usec;
  1570. }
  1571. rdmsrl(MSR_PKGC10_IRTL, msr);
  1572. usec = irtl_2_usec(msr);
  1573. if (usec) {
  1574. bxt_cstates[6].exit_latency = usec;
  1575. bxt_cstates[6].target_residency = usec;
  1576. }
  1577. }
  1578. /**
  1579. * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
  1580. *
  1581. * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
  1582. */
  1583. static void __init sklh_idle_state_table_update(void)
  1584. {
  1585. unsigned long long msr;
  1586. unsigned int eax, ebx, ecx, edx;
  1587. /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
  1588. if (max_cstate <= 7)
  1589. return;
  1590. /* if PC10 not present in CPUID.MWAIT.EDX */
  1591. if ((mwait_substates & (0xF << 28)) == 0)
  1592. return;
  1593. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
  1594. /* PC10 is not enabled in PKG C-state limit */
  1595. if ((msr & 0xF) != 8)
  1596. return;
  1597. ecx = 0;
  1598. cpuid(7, &eax, &ebx, &ecx, &edx);
  1599. /* if SGX is present */
  1600. if (ebx & (1 << 2)) {
  1601. rdmsrl(MSR_IA32_FEAT_CTL, msr);
  1602. /* if SGX is enabled */
  1603. if (msr & (1 << 18))
  1604. return;
  1605. }
  1606. skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
  1607. skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
  1608. }
  1609. /**
  1610. * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake
  1611. * idle states table.
  1612. */
  1613. static void __init skx_idle_state_table_update(void)
  1614. {
  1615. unsigned long long msr;
  1616. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
  1617. /*
  1618. * 000b: C0/C1 (no package C-state support)
  1619. * 001b: C2
  1620. * 010b: C6 (non-retention)
  1621. * 011b: C6 (retention)
  1622. * 111b: No Package C state limits.
  1623. */
  1624. if ((msr & 0x7) < 2) {
  1625. /*
  1626. * Uses the CC6 + PC0 latency and 3 times of
  1627. * latency for target_residency if the PC6
  1628. * is disabled in BIOS. This is consistent
  1629. * with how intel_idle driver uses _CST
  1630. * to set the target_residency.
  1631. */
  1632. skx_cstates[2].exit_latency = 92;
  1633. skx_cstates[2].target_residency = 276;
  1634. }
  1635. }
  1636. /**
  1637. * adl_idle_state_table_update - Adjust AlderLake idle states table.
  1638. */
  1639. static void __init adl_idle_state_table_update(void)
  1640. {
  1641. /* Check if user prefers C1 over C1E. */
  1642. if (preferred_states_mask & BIT(1) && !(preferred_states_mask & BIT(2))) {
  1643. cpuidle_state_table[0].flags &= ~CPUIDLE_FLAG_UNUSABLE;
  1644. cpuidle_state_table[1].flags |= CPUIDLE_FLAG_UNUSABLE;
  1645. /* Disable C1E by clearing the "C1E promotion" bit. */
  1646. c1e_promotion = C1E_PROMOTION_DISABLE;
  1647. return;
  1648. }
  1649. /* Make sure C1E is enabled by default */
  1650. c1e_promotion = C1E_PROMOTION_ENABLE;
  1651. }
  1652. /**
  1653. * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
  1654. */
  1655. static void __init spr_idle_state_table_update(void)
  1656. {
  1657. unsigned long long msr;
  1658. /*
  1659. * By default, the C6 state assumes the worst-case scenario of package
  1660. * C6. However, if PC6 is disabled, we update the numbers to match
  1661. * core C6.
  1662. */
  1663. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
  1664. /* Limit value 2 and above allow for PC6. */
  1665. if ((msr & 0x7) < 2) {
  1666. spr_cstates[2].exit_latency = 190;
  1667. spr_cstates[2].target_residency = 600;
  1668. }
  1669. }
  1670. static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
  1671. {
  1672. unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
  1673. unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
  1674. MWAIT_SUBSTATE_MASK;
  1675. /* Ignore the C-state if there are NO sub-states in CPUID for it. */
  1676. if (num_substates == 0)
  1677. return false;
  1678. if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  1679. mark_tsc_unstable("TSC halts in idle states deeper than C2");
  1680. return true;
  1681. }
  1682. static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
  1683. {
  1684. int cstate;
  1685. switch (boot_cpu_data.x86_model) {
  1686. case INTEL_FAM6_IVYBRIDGE_X:
  1687. ivt_idle_state_table_update();
  1688. break;
  1689. case INTEL_FAM6_ATOM_GOLDMONT:
  1690. case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
  1691. bxt_idle_state_table_update();
  1692. break;
  1693. case INTEL_FAM6_SKYLAKE:
  1694. sklh_idle_state_table_update();
  1695. break;
  1696. case INTEL_FAM6_SKYLAKE_X:
  1697. skx_idle_state_table_update();
  1698. break;
  1699. case INTEL_FAM6_SAPPHIRERAPIDS_X:
  1700. case INTEL_FAM6_EMERALDRAPIDS_X:
  1701. spr_idle_state_table_update();
  1702. break;
  1703. case INTEL_FAM6_ALDERLAKE:
  1704. case INTEL_FAM6_ALDERLAKE_L:
  1705. case INTEL_FAM6_ALDERLAKE_N:
  1706. adl_idle_state_table_update();
  1707. break;
  1708. }
  1709. for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
  1710. unsigned int mwait_hint;
  1711. if (intel_idle_max_cstate_reached(cstate))
  1712. break;
  1713. if (!cpuidle_state_table[cstate].enter &&
  1714. !cpuidle_state_table[cstate].enter_s2idle)
  1715. break;
  1716. /* If marked as unusable, skip this state. */
  1717. if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
  1718. pr_debug("state %s is disabled\n",
  1719. cpuidle_state_table[cstate].name);
  1720. continue;
  1721. }
  1722. mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
  1723. if (!intel_idle_verify_cstate(mwait_hint))
  1724. continue;
  1725. /* Structure copy. */
  1726. drv->states[drv->state_count] = cpuidle_state_table[cstate];
  1727. if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
  1728. drv->states[drv->state_count].enter = intel_idle_irq;
  1729. if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
  1730. cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
  1731. WARN_ON_ONCE(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE);
  1732. drv->states[drv->state_count].enter = intel_idle_ibrs;
  1733. }
  1734. if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_INIT_XSTATE)
  1735. drv->states[drv->state_count].enter = intel_idle_xstate;
  1736. if ((disabled_states_mask & BIT(drv->state_count)) ||
  1737. ((icpu->use_acpi || force_use_acpi) &&
  1738. intel_idle_off_by_default(mwait_hint) &&
  1739. !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
  1740. drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
  1741. if (intel_idle_state_needs_timer_stop(&drv->states[drv->state_count]))
  1742. drv->states[drv->state_count].flags |= CPUIDLE_FLAG_TIMER_STOP;
  1743. drv->state_count++;
  1744. }
  1745. if (icpu->byt_auto_demotion_disable_flag) {
  1746. wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
  1747. wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
  1748. }
  1749. }
  1750. /**
  1751. * intel_idle_cpuidle_driver_init - Create the list of available idle states.
  1752. * @drv: cpuidle driver structure to initialize.
  1753. */
  1754. static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
  1755. {
  1756. cpuidle_poll_state_init(drv);
  1757. if (disabled_states_mask & BIT(0))
  1758. drv->states[0].flags |= CPUIDLE_FLAG_OFF;
  1759. drv->state_count = 1;
  1760. if (icpu)
  1761. intel_idle_init_cstates_icpu(drv);
  1762. else
  1763. intel_idle_init_cstates_acpi(drv);
  1764. }
  1765. static void auto_demotion_disable(void)
  1766. {
  1767. unsigned long long msr_bits;
  1768. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
  1769. msr_bits &= ~auto_demotion_disable_flags;
  1770. wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
  1771. }
  1772. static void c1e_promotion_enable(void)
  1773. {
  1774. unsigned long long msr_bits;
  1775. rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1776. msr_bits |= 0x2;
  1777. wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1778. }
  1779. static void c1e_promotion_disable(void)
  1780. {
  1781. unsigned long long msr_bits;
  1782. rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1783. msr_bits &= ~0x2;
  1784. wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1785. }
  1786. /**
  1787. * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
  1788. * @cpu: CPU to initialize.
  1789. *
  1790. * Register a cpuidle device object for @cpu and update its MSRs in accordance
  1791. * with the processor model flags.
  1792. */
  1793. static int intel_idle_cpu_init(unsigned int cpu)
  1794. {
  1795. struct cpuidle_device *dev;
  1796. dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
  1797. dev->cpu = cpu;
  1798. if (cpuidle_register_device(dev)) {
  1799. pr_debug("cpuidle_register_device %d failed!\n", cpu);
  1800. return -EIO;
  1801. }
  1802. if (auto_demotion_disable_flags)
  1803. auto_demotion_disable();
  1804. if (c1e_promotion == C1E_PROMOTION_ENABLE)
  1805. c1e_promotion_enable();
  1806. else if (c1e_promotion == C1E_PROMOTION_DISABLE)
  1807. c1e_promotion_disable();
  1808. return 0;
  1809. }
  1810. static int intel_idle_cpu_online(unsigned int cpu)
  1811. {
  1812. struct cpuidle_device *dev;
  1813. if (!boot_cpu_has(X86_FEATURE_ARAT))
  1814. tick_broadcast_enable();
  1815. /*
  1816. * Some systems can hotplug a cpu at runtime after
  1817. * the kernel has booted, we have to initialize the
  1818. * driver in this case
  1819. */
  1820. dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
  1821. if (!dev->registered)
  1822. return intel_idle_cpu_init(cpu);
  1823. return 0;
  1824. }
  1825. /**
  1826. * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
  1827. */
  1828. static void __init intel_idle_cpuidle_devices_uninit(void)
  1829. {
  1830. int i;
  1831. for_each_online_cpu(i)
  1832. cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
  1833. }
  1834. static int __init intel_idle_init(void)
  1835. {
  1836. const struct x86_cpu_id *id;
  1837. unsigned int eax, ebx, ecx;
  1838. int retval;
  1839. /* Do not load intel_idle at all for now if idle= is passed */
  1840. if (boot_option_idle_override != IDLE_NO_OVERRIDE)
  1841. return -ENODEV;
  1842. if (max_cstate == 0) {
  1843. pr_debug("disabled\n");
  1844. return -EPERM;
  1845. }
  1846. id = x86_match_cpu(intel_idle_ids);
  1847. if (id) {
  1848. if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
  1849. pr_debug("Please enable MWAIT in BIOS SETUP\n");
  1850. return -ENODEV;
  1851. }
  1852. } else {
  1853. id = x86_match_cpu(intel_mwait_ids);
  1854. if (!id)
  1855. return -ENODEV;
  1856. }
  1857. if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
  1858. return -ENODEV;
  1859. cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
  1860. if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
  1861. !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
  1862. !mwait_substates)
  1863. return -ENODEV;
  1864. pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
  1865. icpu = (const struct idle_cpu *)id->driver_data;
  1866. if (icpu) {
  1867. cpuidle_state_table = icpu->state_table;
  1868. auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
  1869. if (icpu->disable_promotion_to_c1e)
  1870. c1e_promotion = C1E_PROMOTION_DISABLE;
  1871. if (icpu->use_acpi || force_use_acpi)
  1872. intel_idle_acpi_cst_extract();
  1873. } else if (!intel_idle_acpi_cst_extract()) {
  1874. return -ENODEV;
  1875. }
  1876. pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
  1877. boot_cpu_data.x86_model);
  1878. intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
  1879. if (!intel_idle_cpuidle_devices)
  1880. return -ENOMEM;
  1881. intel_idle_cpuidle_driver_init(&intel_idle_driver);
  1882. retval = cpuidle_register_driver(&intel_idle_driver);
  1883. if (retval) {
  1884. struct cpuidle_driver *drv = cpuidle_get_driver();
  1885. printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
  1886. drv ? drv->name : "none");
  1887. goto init_driver_fail;
  1888. }
  1889. retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
  1890. intel_idle_cpu_online, NULL);
  1891. if (retval < 0)
  1892. goto hp_setup_fail;
  1893. pr_debug("Local APIC timer is reliable in %s\n",
  1894. boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
  1895. return 0;
  1896. hp_setup_fail:
  1897. intel_idle_cpuidle_devices_uninit();
  1898. cpuidle_unregister_driver(&intel_idle_driver);
  1899. init_driver_fail:
  1900. free_percpu(intel_idle_cpuidle_devices);
  1901. return retval;
  1902. }
  1903. device_initcall(intel_idle_init);
  1904. /*
  1905. * We are not really modular, but we used to support that. Meaning we also
  1906. * support "intel_idle.max_cstate=..." at boot and also a read-only export of
  1907. * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
  1908. * is the easiest way (currently) to continue doing that.
  1909. */
  1910. module_param(max_cstate, int, 0444);
  1911. /*
  1912. * The positions of the bits that are set in this number are the indices of the
  1913. * idle states to be disabled by default (as reflected by the names of the
  1914. * corresponding idle state directories in sysfs, "state0", "state1" ...
  1915. * "state<i>" ..., where <i> is the index of the given state).
  1916. */
  1917. module_param_named(states_off, disabled_states_mask, uint, 0444);
  1918. MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
  1919. /*
  1920. * Some platforms come with mutually exclusive C-states, so that if one is
  1921. * enabled, the other C-states must not be used. Example: C1 and C1E on
  1922. * Sapphire Rapids platform. This parameter allows for selecting the
  1923. * preferred C-states among the groups of mutually exclusive C-states - the
  1924. * selected C-states will be registered, the other C-states from the mutually
  1925. * exclusive group won't be registered. If the platform has no mutually
  1926. * exclusive C-states, this parameter has no effect.
  1927. */
  1928. module_param_named(preferred_cstates, preferred_states_mask, uint, 0444);
  1929. MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");