dt_idle_genpd.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * PM domains for CPUs via genpd.
  4. *
  5. * Copyright (C) 2019 Linaro Ltd.
  6. * Author: Ulf Hansson <[email protected]>
  7. *
  8. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  9. * Copyright (c) 2022 Ventana Micro Systems Inc.
  10. */
  11. #define pr_fmt(fmt) "dt-idle-genpd: " fmt
  12. #include <linux/cpu.h>
  13. #include <linux/device.h>
  14. #include <linux/kernel.h>
  15. #include <linux/pm_domain.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/slab.h>
  18. #include <linux/string.h>
  19. #include "dt_idle_genpd.h"
  20. static int pd_parse_state_nodes(
  21. int (*parse_state)(struct device_node *, u32 *),
  22. struct genpd_power_state *states, int state_count)
  23. {
  24. int i, ret;
  25. u32 state, *state_buf;
  26. for (i = 0; i < state_count; i++) {
  27. ret = parse_state(to_of_node(states[i].fwnode), &state);
  28. if (ret)
  29. goto free_state;
  30. state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
  31. if (!state_buf) {
  32. ret = -ENOMEM;
  33. goto free_state;
  34. }
  35. *state_buf = state;
  36. states[i].data = state_buf;
  37. }
  38. return 0;
  39. free_state:
  40. i--;
  41. for (; i >= 0; i--)
  42. kfree(states[i].data);
  43. return ret;
  44. }
  45. static int pd_parse_states(struct device_node *np,
  46. int (*parse_state)(struct device_node *, u32 *),
  47. struct genpd_power_state **states,
  48. int *state_count)
  49. {
  50. int ret;
  51. /* Parse the domain idle states. */
  52. ret = of_genpd_parse_idle_states(np, states, state_count);
  53. if (ret)
  54. return ret;
  55. /* Fill out the dt specifics for each found state. */
  56. ret = pd_parse_state_nodes(parse_state, *states, *state_count);
  57. if (ret)
  58. kfree(*states);
  59. return ret;
  60. }
  61. static void pd_free_states(struct genpd_power_state *states,
  62. unsigned int state_count)
  63. {
  64. int i;
  65. for (i = 0; i < state_count; i++)
  66. kfree(states[i].data);
  67. kfree(states);
  68. }
  69. void dt_idle_pd_free(struct generic_pm_domain *pd)
  70. {
  71. pd_free_states(pd->states, pd->state_count);
  72. kfree(pd->name);
  73. kfree(pd);
  74. }
  75. struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
  76. int (*parse_state)(struct device_node *, u32 *))
  77. {
  78. struct generic_pm_domain *pd;
  79. struct genpd_power_state *states = NULL;
  80. int ret, state_count = 0;
  81. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  82. if (!pd)
  83. goto out;
  84. pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
  85. if (!pd->name)
  86. goto free_pd;
  87. /*
  88. * Parse the domain idle states and let genpd manage the state selection
  89. * for those being compatible with "domain-idle-state".
  90. */
  91. ret = pd_parse_states(np, parse_state, &states, &state_count);
  92. if (ret)
  93. goto free_name;
  94. pd->free_states = pd_free_states;
  95. pd->name = kbasename(pd->name);
  96. pd->states = states;
  97. pd->state_count = state_count;
  98. pr_debug("alloc PM domain %s\n", pd->name);
  99. return pd;
  100. free_name:
  101. kfree(pd->name);
  102. free_pd:
  103. kfree(pd);
  104. out:
  105. pr_err("failed to alloc PM domain %pOF\n", np);
  106. return NULL;
  107. }
  108. int dt_idle_pd_init_topology(struct device_node *np)
  109. {
  110. struct device_node *node;
  111. struct of_phandle_args child, parent;
  112. int ret;
  113. for_each_child_of_node(np, node) {
  114. if (of_parse_phandle_with_args(node, "power-domains",
  115. "#power-domain-cells", 0, &parent))
  116. continue;
  117. child.np = node;
  118. child.args_count = 0;
  119. ret = of_genpd_add_subdomain(&parent, &child);
  120. of_node_put(parent.np);
  121. if (ret) {
  122. of_node_put(node);
  123. return ret;
  124. }
  125. }
  126. return 0;
  127. }
  128. int dt_idle_pd_remove_topology(struct device_node *np)
  129. {
  130. struct device_node *node;
  131. struct of_phandle_args child, parent;
  132. int ret;
  133. for_each_child_of_node(np, node) {
  134. if (of_parse_phandle_with_args(node, "power-domains",
  135. "#power-domain-cells", 0, &parent))
  136. continue;
  137. child.np = node;
  138. child.args_count = 0;
  139. ret = of_genpd_remove_subdomain(&parent, &child);
  140. of_node_put(parent.np);
  141. if (ret) {
  142. of_node_put(node);
  143. return ret;
  144. }
  145. }
  146. return 0;
  147. }
  148. struct device *dt_idle_attach_cpu(int cpu, const char *name)
  149. {
  150. struct device *dev;
  151. dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name);
  152. if (IS_ERR_OR_NULL(dev))
  153. return dev;
  154. pm_runtime_irq_safe(dev);
  155. if (cpu_online(cpu))
  156. pm_runtime_get_sync(dev);
  157. dev_pm_syscore_device(dev, true);
  158. return dev;
  159. }
  160. void dt_idle_detach_cpu(struct device *dev)
  161. {
  162. if (IS_ERR_OR_NULL(dev))
  163. return;
  164. dev_pm_domain_detach(dev, false);
  165. }