cpuidle-riscv-sbi.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * RISC-V SBI CPU idle driver.
  4. *
  5. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  6. * Copyright (c) 2022 Ventana Micro Systems Inc.
  7. */
  8. #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
  9. #include <linux/cpuidle.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/cpu_pm.h>
  12. #include <linux/cpu_cooling.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_device.h>
  17. #include <linux/slab.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/pm_domain.h>
  20. #include <linux/pm_runtime.h>
  21. #include <asm/cpuidle.h>
  22. #include <asm/sbi.h>
  23. #include <asm/smp.h>
  24. #include <asm/suspend.h>
  25. #include "dt_idle_states.h"
  26. #include "dt_idle_genpd.h"
  27. struct sbi_cpuidle_data {
  28. u32 *states;
  29. struct device *dev;
  30. };
  31. struct sbi_domain_state {
  32. bool available;
  33. u32 state;
  34. };
  35. static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
  36. static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
  37. static bool sbi_cpuidle_use_osi;
  38. static bool sbi_cpuidle_use_cpuhp;
  39. static bool sbi_cpuidle_pd_allow_domain_state;
  40. static inline void sbi_set_domain_state(u32 state)
  41. {
  42. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  43. data->available = true;
  44. data->state = state;
  45. }
  46. static inline u32 sbi_get_domain_state(void)
  47. {
  48. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  49. return data->state;
  50. }
  51. static inline void sbi_clear_domain_state(void)
  52. {
  53. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  54. data->available = false;
  55. }
  56. static inline bool sbi_is_domain_state_available(void)
  57. {
  58. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  59. return data->available;
  60. }
  61. static int sbi_suspend_finisher(unsigned long suspend_type,
  62. unsigned long resume_addr,
  63. unsigned long opaque)
  64. {
  65. struct sbiret ret;
  66. ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
  67. suspend_type, resume_addr, opaque, 0, 0, 0);
  68. return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
  69. }
  70. static int sbi_suspend(u32 state)
  71. {
  72. if (state & SBI_HSM_SUSP_NON_RET_BIT)
  73. return cpu_suspend(state, sbi_suspend_finisher);
  74. else
  75. return sbi_suspend_finisher(state, 0, 0);
  76. }
  77. static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
  78. struct cpuidle_driver *drv, int idx)
  79. {
  80. u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
  81. u32 state = states[idx];
  82. if (state & SBI_HSM_SUSP_NON_RET_BIT)
  83. return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
  84. else
  85. return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
  86. idx, state);
  87. }
  88. static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
  89. struct cpuidle_driver *drv, int idx,
  90. bool s2idle)
  91. {
  92. struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
  93. u32 *states = data->states;
  94. struct device *pd_dev = data->dev;
  95. u32 state;
  96. int ret;
  97. ret = cpu_pm_enter();
  98. if (ret)
  99. return -1;
  100. /* Do runtime PM to manage a hierarchical CPU toplogy. */
  101. ct_irq_enter_irqson();
  102. if (s2idle)
  103. dev_pm_genpd_suspend(pd_dev);
  104. else
  105. pm_runtime_put_sync_suspend(pd_dev);
  106. ct_irq_exit_irqson();
  107. if (sbi_is_domain_state_available())
  108. state = sbi_get_domain_state();
  109. else
  110. state = states[idx];
  111. ret = sbi_suspend(state) ? -1 : idx;
  112. ct_irq_enter_irqson();
  113. if (s2idle)
  114. dev_pm_genpd_resume(pd_dev);
  115. else
  116. pm_runtime_get_sync(pd_dev);
  117. ct_irq_exit_irqson();
  118. cpu_pm_exit();
  119. /* Clear the domain state to start fresh when back from idle. */
  120. sbi_clear_domain_state();
  121. return ret;
  122. }
  123. static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
  124. struct cpuidle_driver *drv, int idx)
  125. {
  126. return __sbi_enter_domain_idle_state(dev, drv, idx, false);
  127. }
  128. static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
  129. struct cpuidle_driver *drv,
  130. int idx)
  131. {
  132. return __sbi_enter_domain_idle_state(dev, drv, idx, true);
  133. }
  134. static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
  135. {
  136. struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
  137. if (pd_dev)
  138. pm_runtime_get_sync(pd_dev);
  139. return 0;
  140. }
  141. static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
  142. {
  143. struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
  144. if (pd_dev) {
  145. pm_runtime_put_sync(pd_dev);
  146. /* Clear domain state to start fresh at next online. */
  147. sbi_clear_domain_state();
  148. }
  149. return 0;
  150. }
  151. static void sbi_idle_init_cpuhp(void)
  152. {
  153. int err;
  154. if (!sbi_cpuidle_use_cpuhp)
  155. return;
  156. err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
  157. "cpuidle/sbi:online",
  158. sbi_cpuidle_cpuhp_up,
  159. sbi_cpuidle_cpuhp_down);
  160. if (err)
  161. pr_warn("Failed %d while setup cpuhp state\n", err);
  162. }
  163. static const struct of_device_id sbi_cpuidle_state_match[] = {
  164. { .compatible = "riscv,idle-state",
  165. .data = sbi_cpuidle_enter_state },
  166. { },
  167. };
  168. static bool sbi_suspend_state_is_valid(u32 state)
  169. {
  170. if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
  171. state < SBI_HSM_SUSPEND_RET_PLATFORM)
  172. return false;
  173. if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
  174. state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
  175. return false;
  176. return true;
  177. }
  178. static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
  179. {
  180. int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
  181. if (err) {
  182. pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
  183. return err;
  184. }
  185. if (!sbi_suspend_state_is_valid(*state)) {
  186. pr_warn("Invalid SBI suspend state %#x\n", *state);
  187. return -EINVAL;
  188. }
  189. return 0;
  190. }
  191. static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
  192. struct sbi_cpuidle_data *data,
  193. unsigned int state_count, int cpu)
  194. {
  195. /* Currently limit the hierarchical topology to be used in OSI mode. */
  196. if (!sbi_cpuidle_use_osi)
  197. return 0;
  198. data->dev = dt_idle_attach_cpu(cpu, "sbi");
  199. if (IS_ERR_OR_NULL(data->dev))
  200. return PTR_ERR_OR_ZERO(data->dev);
  201. /*
  202. * Using the deepest state for the CPU to trigger a potential selection
  203. * of a shared state for the domain, assumes the domain states are all
  204. * deeper states.
  205. */
  206. drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
  207. drv->states[state_count - 1].enter_s2idle =
  208. sbi_enter_s2idle_domain_idle_state;
  209. sbi_cpuidle_use_cpuhp = true;
  210. return 0;
  211. }
  212. static int sbi_cpuidle_dt_init_states(struct device *dev,
  213. struct cpuidle_driver *drv,
  214. unsigned int cpu,
  215. unsigned int state_count)
  216. {
  217. struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
  218. struct device_node *state_node;
  219. struct device_node *cpu_node;
  220. u32 *states;
  221. int i, ret;
  222. cpu_node = of_cpu_device_node_get(cpu);
  223. if (!cpu_node)
  224. return -ENODEV;
  225. states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
  226. if (!states) {
  227. ret = -ENOMEM;
  228. goto fail;
  229. }
  230. /* Parse SBI specific details from state DT nodes */
  231. for (i = 1; i < state_count; i++) {
  232. state_node = of_get_cpu_state_node(cpu_node, i - 1);
  233. if (!state_node)
  234. break;
  235. ret = sbi_dt_parse_state_node(state_node, &states[i]);
  236. of_node_put(state_node);
  237. if (ret)
  238. return ret;
  239. pr_debug("sbi-state %#x index %d\n", states[i], i);
  240. }
  241. if (i != state_count) {
  242. ret = -ENODEV;
  243. goto fail;
  244. }
  245. /* Initialize optional data, used for the hierarchical topology. */
  246. ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
  247. if (ret < 0)
  248. return ret;
  249. /* Store states in the per-cpu struct. */
  250. data->states = states;
  251. fail:
  252. of_node_put(cpu_node);
  253. return ret;
  254. }
  255. static void sbi_cpuidle_deinit_cpu(int cpu)
  256. {
  257. struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
  258. dt_idle_detach_cpu(data->dev);
  259. sbi_cpuidle_use_cpuhp = false;
  260. }
  261. static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
  262. {
  263. struct cpuidle_driver *drv;
  264. unsigned int state_count = 0;
  265. int ret = 0;
  266. drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
  267. if (!drv)
  268. return -ENOMEM;
  269. drv->name = "sbi_cpuidle";
  270. drv->owner = THIS_MODULE;
  271. drv->cpumask = (struct cpumask *)cpumask_of(cpu);
  272. /* RISC-V architectural WFI to be represented as state index 0. */
  273. drv->states[0].enter = sbi_cpuidle_enter_state;
  274. drv->states[0].exit_latency = 1;
  275. drv->states[0].target_residency = 1;
  276. drv->states[0].power_usage = UINT_MAX;
  277. strcpy(drv->states[0].name, "WFI");
  278. strcpy(drv->states[0].desc, "RISC-V WFI");
  279. /*
  280. * If no DT idle states are detected (ret == 0) let the driver
  281. * initialization fail accordingly since there is no reason to
  282. * initialize the idle driver if only wfi is supported, the
  283. * default archictectural back-end already executes wfi
  284. * on idle entry.
  285. */
  286. ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
  287. if (ret <= 0) {
  288. pr_debug("HART%ld: failed to parse DT idle states\n",
  289. cpuid_to_hartid_map(cpu));
  290. return ret ? : -ENODEV;
  291. }
  292. state_count = ret + 1; /* Include WFI state as well */
  293. /* Initialize idle states from DT. */
  294. ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
  295. if (ret) {
  296. pr_err("HART%ld: failed to init idle states\n",
  297. cpuid_to_hartid_map(cpu));
  298. return ret;
  299. }
  300. ret = cpuidle_register(drv, NULL);
  301. if (ret)
  302. goto deinit;
  303. cpuidle_cooling_register(drv);
  304. return 0;
  305. deinit:
  306. sbi_cpuidle_deinit_cpu(cpu);
  307. return ret;
  308. }
  309. static void sbi_cpuidle_domain_sync_state(struct device *dev)
  310. {
  311. /*
  312. * All devices have now been attached/probed to the PM domain
  313. * topology, hence it's fine to allow domain states to be picked.
  314. */
  315. sbi_cpuidle_pd_allow_domain_state = true;
  316. }
  317. #ifdef CONFIG_DT_IDLE_GENPD
  318. static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
  319. {
  320. struct genpd_power_state *state = &pd->states[pd->state_idx];
  321. u32 *pd_state;
  322. if (!state->data)
  323. return 0;
  324. if (!sbi_cpuidle_pd_allow_domain_state)
  325. return -EBUSY;
  326. /* OSI mode is enabled, set the corresponding domain state. */
  327. pd_state = state->data;
  328. sbi_set_domain_state(*pd_state);
  329. return 0;
  330. }
  331. struct sbi_pd_provider {
  332. struct list_head link;
  333. struct device_node *node;
  334. };
  335. static LIST_HEAD(sbi_pd_providers);
  336. static int sbi_pd_init(struct device_node *np)
  337. {
  338. struct generic_pm_domain *pd;
  339. struct sbi_pd_provider *pd_provider;
  340. struct dev_power_governor *pd_gov;
  341. int ret = -ENOMEM;
  342. pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
  343. if (!pd)
  344. goto out;
  345. pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
  346. if (!pd_provider)
  347. goto free_pd;
  348. pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
  349. /* Allow power off when OSI is available. */
  350. if (sbi_cpuidle_use_osi)
  351. pd->power_off = sbi_cpuidle_pd_power_off;
  352. else
  353. pd->flags |= GENPD_FLAG_ALWAYS_ON;
  354. /* Use governor for CPU PM domains if it has some states to manage. */
  355. pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;
  356. ret = pm_genpd_init(pd, pd_gov, false);
  357. if (ret)
  358. goto free_pd_prov;
  359. ret = of_genpd_add_provider_simple(np, pd);
  360. if (ret)
  361. goto remove_pd;
  362. pd_provider->node = of_node_get(np);
  363. list_add(&pd_provider->link, &sbi_pd_providers);
  364. pr_debug("init PM domain %s\n", pd->name);
  365. return 0;
  366. remove_pd:
  367. pm_genpd_remove(pd);
  368. free_pd_prov:
  369. kfree(pd_provider);
  370. free_pd:
  371. dt_idle_pd_free(pd);
  372. out:
  373. pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
  374. return ret;
  375. }
  376. static void sbi_pd_remove(void)
  377. {
  378. struct sbi_pd_provider *pd_provider, *it;
  379. struct generic_pm_domain *genpd;
  380. list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
  381. of_genpd_del_provider(pd_provider->node);
  382. genpd = of_genpd_remove_last(pd_provider->node);
  383. if (!IS_ERR(genpd))
  384. kfree(genpd);
  385. of_node_put(pd_provider->node);
  386. list_del(&pd_provider->link);
  387. kfree(pd_provider);
  388. }
  389. }
  390. static int sbi_genpd_probe(struct device_node *np)
  391. {
  392. struct device_node *node;
  393. int ret = 0, pd_count = 0;
  394. if (!np)
  395. return -ENODEV;
  396. /*
  397. * Parse child nodes for the "#power-domain-cells" property and
  398. * initialize a genpd/genpd-of-provider pair when it's found.
  399. */
  400. for_each_child_of_node(np, node) {
  401. if (!of_find_property(node, "#power-domain-cells", NULL))
  402. continue;
  403. ret = sbi_pd_init(node);
  404. if (ret)
  405. goto put_node;
  406. pd_count++;
  407. }
  408. /* Bail out if not using the hierarchical CPU topology. */
  409. if (!pd_count)
  410. goto no_pd;
  411. /* Link genpd masters/subdomains to model the CPU topology. */
  412. ret = dt_idle_pd_init_topology(np);
  413. if (ret)
  414. goto remove_pd;
  415. return 0;
  416. put_node:
  417. of_node_put(node);
  418. remove_pd:
  419. sbi_pd_remove();
  420. pr_err("failed to create CPU PM domains ret=%d\n", ret);
  421. no_pd:
  422. return ret;
  423. }
  424. #else
  425. static inline int sbi_genpd_probe(struct device_node *np)
  426. {
  427. return 0;
  428. }
  429. #endif
  430. static int sbi_cpuidle_probe(struct platform_device *pdev)
  431. {
  432. int cpu, ret;
  433. struct cpuidle_driver *drv;
  434. struct cpuidle_device *dev;
  435. struct device_node *np, *pds_node;
  436. /* Detect OSI support based on CPU DT nodes */
  437. sbi_cpuidle_use_osi = true;
  438. for_each_possible_cpu(cpu) {
  439. np = of_cpu_device_node_get(cpu);
  440. if (np &&
  441. of_find_property(np, "power-domains", NULL) &&
  442. of_find_property(np, "power-domain-names", NULL)) {
  443. continue;
  444. } else {
  445. sbi_cpuidle_use_osi = false;
  446. break;
  447. }
  448. }
  449. /* Populate generic power domains from DT nodes */
  450. pds_node = of_find_node_by_path("/cpus/power-domains");
  451. if (pds_node) {
  452. ret = sbi_genpd_probe(pds_node);
  453. of_node_put(pds_node);
  454. if (ret)
  455. return ret;
  456. }
  457. /* Initialize CPU idle driver for each CPU */
  458. for_each_possible_cpu(cpu) {
  459. ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
  460. if (ret) {
  461. pr_debug("HART%ld: idle driver init failed\n",
  462. cpuid_to_hartid_map(cpu));
  463. goto out_fail;
  464. }
  465. }
  466. /* Setup CPU hotplut notifiers */
  467. sbi_idle_init_cpuhp();
  468. pr_info("idle driver registered for all CPUs\n");
  469. return 0;
  470. out_fail:
  471. while (--cpu >= 0) {
  472. dev = per_cpu(cpuidle_devices, cpu);
  473. drv = cpuidle_get_cpu_driver(dev);
  474. cpuidle_unregister(drv);
  475. sbi_cpuidle_deinit_cpu(cpu);
  476. }
  477. return ret;
  478. }
  479. static struct platform_driver sbi_cpuidle_driver = {
  480. .probe = sbi_cpuidle_probe,
  481. .driver = {
  482. .name = "sbi-cpuidle",
  483. .sync_state = sbi_cpuidle_domain_sync_state,
  484. },
  485. };
  486. static int __init sbi_cpuidle_init(void)
  487. {
  488. int ret;
  489. struct platform_device *pdev;
  490. /*
  491. * The SBI HSM suspend function is only available when:
  492. * 1) SBI version is 0.3 or higher
  493. * 2) SBI HSM extension is available
  494. */
  495. if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
  496. !sbi_probe_extension(SBI_EXT_HSM)) {
  497. pr_info("HSM suspend not available\n");
  498. return 0;
  499. }
  500. ret = platform_driver_register(&sbi_cpuidle_driver);
  501. if (ret)
  502. return ret;
  503. pdev = platform_device_register_simple("sbi-cpuidle",
  504. -1, NULL, 0);
  505. if (IS_ERR(pdev)) {
  506. platform_driver_unregister(&sbi_cpuidle_driver);
  507. return PTR_ERR(pdev);
  508. }
  509. return 0;
  510. }
  511. device_initcall(sbi_cpuidle_init);