qcom-cluster-lpm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/cpu.h>
  7. #include <linux/cpuidle.h>
  8. #include <linux/cpu_pm.h>
  9. #include <linux/kernel.h>
  10. #include <linux/ktime.h>
  11. #include <linux/module.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/pm_domain.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/sched/idle.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/tick.h>
  19. #include <linux/time64.h>
  20. #if defined(_TRACE_HOOK_PM_DOMAIN_H)
  21. #include <trace/hooks/pm_domain.h>
  22. #endif
  23. #define CREATE_TRACE_POINTS
  24. #include "trace-cluster-lpm.h"
  25. #include "qcom-lpm.h"
  26. LIST_HEAD(cluster_dev_list);
  27. static struct lpm_cluster *to_cluster(struct generic_pm_domain *genpd)
  28. {
  29. struct lpm_cluster *cluster_gov;
  30. list_for_each_entry(cluster_gov, &cluster_dev_list, list)
  31. if (cluster_gov->genpd == genpd)
  32. return cluster_gov;
  33. return NULL;
  34. }
  35. /**
  36. * clusttimer_fn() - Will be executed when cluster prediction timer expires
  37. * @h: Cluster prediction timer
  38. */
  39. static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
  40. {
  41. struct lpm_cluster *cluster_gov = container_of(h,
  42. struct lpm_cluster, histtimer);
  43. cluster_gov->history_invalid = true;
  44. return HRTIMER_NORESTART;
  45. }
  46. /**
  47. * clusttimer_start() - Programs the hrtimer with given timer value
  48. * @time_ns: Value to be program
  49. */
  50. static void clusttimer_start(struct lpm_cluster *cluster_gov, uint32_t time_us)
  51. {
  52. struct hrtimer *timer = &cluster_gov->histtimer;
  53. uint64_t time_ns = time_us * NSEC_PER_USEC;
  54. ktime_t clust_ktime = ns_to_ktime(time_ns);
  55. timer->function = clusttimer_fn;
  56. hrtimer_start(timer, clust_ktime, HRTIMER_MODE_REL_PINNED);
  57. }
  58. /**
  59. * clusttimer_cancel() - Cancel the hrtimr after cluster wakeup from sleep
  60. * @cluster_gov: Targeted cluster's lpm data structure
  61. */
  62. static void clusttimer_cancel(struct lpm_cluster *cluster_gov)
  63. {
  64. ktime_t time_rem;
  65. time_rem = hrtimer_get_remaining(&cluster_gov->histtimer);
  66. if (ktime_to_us(time_rem) > 0)
  67. hrtimer_try_to_cancel(&cluster_gov->histtimer);
  68. }
  69. /**
  70. * cluster_predict() - Predict the cluster's next wakeup.
  71. * @cluster_gov: Targeted cluster's lpm data structure
  72. */
  73. static void cluster_predict(struct lpm_cluster *cluster_gov)
  74. {
  75. struct generic_pm_domain *genpd = cluster_gov->genpd;
  76. int i, j, idx = genpd->state_idx;
  77. int64_t cur_time = ktime_to_us(cluster_gov->now);
  78. uint64_t avg_residency = 0;
  79. cluster_gov->pred_wakeup = KTIME_MAX;
  80. cluster_gov->predicted = false;
  81. if (prediction_disabled)
  82. return;
  83. /*
  84. * Samples are marked invalid when woken-up due to timer,
  85. * so do not predict.
  86. */
  87. if (cluster_gov->history_invalid) {
  88. cluster_gov->history_invalid = false;
  89. cluster_gov->htmr_wkup = true;
  90. return;
  91. }
  92. /*
  93. * Cluster wakes up whenever any core of the cluster wakes up.
  94. * Since for the last cluster LPM exit, there could be multiple core(s)
  95. * LPMs. So, consider only recent history for the cluster.
  96. */
  97. if (cluster_gov->nsamp == MAXSAMPLES) {
  98. for (i = 0; i < MAXSAMPLES; i++) {
  99. if ((cur_time - cluster_gov->history[i].entry_time)
  100. > CLUST_SMPL_INVLD_TIME)
  101. cluster_gov->nsamp--;
  102. }
  103. }
  104. /* Predict only when all the samples are collected. */
  105. if (cluster_gov->nsamp < MAXSAMPLES)
  106. return;
  107. /*
  108. * If cluster's last entered mode is shallower state then calculate
  109. * the next predicted wakeup as avg of previous samples
  110. */
  111. if (idx < genpd->state_count - 1) {
  112. for (i = 0; i < MAXSAMPLES; i++)
  113. avg_residency += cluster_gov->history[i].residency;
  114. do_div(avg_residency, MAXSAMPLES);
  115. cluster_gov->pred_wakeup = ktime_add_us(avg_residency,
  116. cluster_gov->now);
  117. cluster_gov->predicted = true;
  118. return;
  119. }
  120. /*
  121. * Find the number of premature exits for each of the mode,
  122. * excluding clockgating mode, and they are more than fifty
  123. * percent restrict that and deeper modes.
  124. */
  125. for (j = 1; j < genpd->state_count; j++) {
  126. uint32_t count = 0;
  127. u32 residency = genpd->states[j].residency_ns;
  128. avg_residency = 0;
  129. for (i = 0; i < MAXSAMPLES; i++) {
  130. if ((cluster_gov->history[i].mode == j) &&
  131. (cluster_gov->history[i].residency <
  132. do_div(residency, NSEC_PER_USEC))) {
  133. count++;
  134. avg_residency +=
  135. cluster_gov->history[i].residency;
  136. }
  137. }
  138. if (count > PRED_PREMATURE_CNT) {
  139. do_div(avg_residency, count);
  140. cluster_gov->pred_wakeup = ktime_add_us(cluster_gov->now,
  141. avg_residency);
  142. cluster_gov->predicted = true;
  143. return;
  144. }
  145. }
  146. }
  147. /**
  148. * clear_cluster_history() - Clears the stored previous samples data.
  149. * It will be called when APSS going to deep sleep.
  150. * @cluster_gov: Targeted cluster's lpm data structure
  151. */
  152. static void clear_cluster_history(struct lpm_cluster *cluster_gov)
  153. {
  154. int i;
  155. for (i = 0; i < MAXSAMPLES; i++) {
  156. cluster_gov->history[i].residency = 0;
  157. cluster_gov->history[i].mode = -1;
  158. cluster_gov->history[i].entry_time = 0;
  159. }
  160. cluster_gov->samples_idx = 0;
  161. cluster_gov->nsamp = 0;
  162. cluster_gov->history_invalid = false;
  163. cluster_gov->htmr_wkup = false;
  164. }
  165. /**
  166. * update_cluster_history() - Update the smaples history data every time when
  167. * cluster exit from sleep.
  168. * @cluster_gov: Targeted cluster's lpm data structure
  169. */
  170. static void update_cluster_history(struct lpm_cluster *cluster_gov)
  171. {
  172. bool tmr = false;
  173. uint32_t residency = 0;
  174. struct generic_pm_domain *genpd = cluster_gov->genpd;
  175. int idx = genpd->state_idx, samples_idx = cluster_gov->samples_idx;
  176. if (prediction_disabled)
  177. return;
  178. if ((cluster_gov->entry_idx == -1) || (cluster_gov->entry_idx == idx)) {
  179. residency = ktime_sub(cluster_gov->now, cluster_gov->entry_time);
  180. residency = ktime_to_us(residency);
  181. cluster_gov->history[samples_idx].entry_time =
  182. ktime_to_us(cluster_gov->entry_time);
  183. } else
  184. return;
  185. if (cluster_gov->htmr_wkup) {
  186. if (!samples_idx)
  187. samples_idx = MAXSAMPLES - 1;
  188. else
  189. samples_idx--;
  190. cluster_gov->history[samples_idx].residency += residency;
  191. cluster_gov->htmr_wkup = false;
  192. tmr = true;
  193. } else
  194. cluster_gov->history[samples_idx].residency = residency;
  195. cluster_gov->history[samples_idx].mode = idx;
  196. cluster_gov->entry_idx = INT_MIN;
  197. cluster_gov->entry_time = 0;
  198. if (cluster_gov->nsamp < MAXSAMPLES)
  199. cluster_gov->nsamp++;
  200. trace_cluster_pred_hist(cluster_gov->history[samples_idx].mode,
  201. cluster_gov->history[samples_idx].residency,
  202. samples_idx, tmr);
  203. samples_idx++;
  204. if (samples_idx >= MAXSAMPLES)
  205. samples_idx = 0;
  206. cluster_gov->samples_idx = samples_idx;
  207. }
  208. /**
  209. * cluster_power_down() - Will be called when cluster domain going to power off.
  210. * If this entry's next wakeup was predicted it programs
  211. * the cluster prediction timer and stores the idx entering
  212. * and entry time of this lpm into clusters private data
  213. * structure.
  214. * @cluster_gov: cluster's lpm data structure
  215. */
  216. static void cluster_power_down(struct lpm_cluster *cluster_gov)
  217. {
  218. struct generic_pm_domain *genpd = cluster_gov->genpd;
  219. struct genpd_governor_data *gd = genpd->gd;
  220. int idx = genpd->state_idx;
  221. uint32_t residency;
  222. if (idx < 0)
  223. return;
  224. cluster_gov->entry_time = cluster_gov->now;
  225. cluster_gov->entry_idx = idx;
  226. trace_cluster_pred_select(genpd->state_idx, gd->next_wakeup,
  227. 0, cluster_gov->predicted, cluster_gov->next_wakeup);
  228. if (idx >= genpd->state_count - 1) {
  229. clear_cpu_predict_history();
  230. clear_cluster_history(cluster_gov);
  231. return;
  232. }
  233. if (ktime_compare(cluster_gov->next_wakeup, cluster_gov->pred_wakeup))
  234. return;
  235. residency = genpd->states[idx + 1].residency_ns;
  236. do_div(residency, NSEC_PER_USEC);
  237. clusttimer_start(cluster_gov, residency + PRED_TIMER_ADD);
  238. }
  239. /**
  240. * cluster_power_cb() - It will be called when cluster domain power_off/power_on
  241. * @nb: notifier block of the cluster
  242. * @action: action i.e power_off/power_on
  243. * @data: pointer to private data structure
  244. *
  245. * It returns the NOTIFY_OK/NOTIFY_BAD to notify the notifier call chain
  246. */
  247. static int cluster_power_cb(struct notifier_block *nb,
  248. unsigned long action, void *data)
  249. {
  250. struct lpm_cluster *cluster_gov = container_of(nb, struct lpm_cluster, genpd_nb);
  251. struct generic_pm_domain *pd = cluster_gov->genpd;
  252. struct genpd_power_state *state = &pd->states[pd->state_idx];
  253. struct lpm_cpu *cpu_gov;
  254. int cpu;
  255. u32 *suspend_param = state->data;
  256. switch (action) {
  257. case GENPD_NOTIFY_ON:
  258. trace_cluster_exit(raw_smp_processor_id(), pd->state_idx, *suspend_param);
  259. if (cluster_gov->genpd->suspended_count != 0)
  260. break;
  261. cluster_gov->now = ktime_get();
  262. clusttimer_cancel(cluster_gov);
  263. update_cluster_history(cluster_gov);
  264. cluster_predict(cluster_gov);
  265. break;
  266. case GENPD_NOTIFY_PRE_OFF:
  267. if (!pd->gd)
  268. return NOTIFY_BAD;
  269. if (!cluster_gov->state_allowed[pd->state_idx])
  270. return NOTIFY_BAD;
  271. if (cluster_gov->genpd->suspended_count != 0) {
  272. clear_cpu_predict_history();
  273. clear_cluster_history(cluster_gov);
  274. break;
  275. }
  276. for_each_cpu(cpu, cluster_gov->genpd->cpus) {
  277. if (cpu_online(cpu)) {
  278. cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  279. if (cpu_gov->ipi_pending)
  280. return NOTIFY_BAD;
  281. }
  282. }
  283. cluster_gov->now = ktime_get();
  284. cluster_power_down(cluster_gov);
  285. break;
  286. case GENPD_NOTIFY_OFF:
  287. trace_cluster_enter(raw_smp_processor_id(), pd->state_idx, *suspend_param);
  288. break;
  289. default:
  290. break;
  291. }
  292. return NOTIFY_OK;
  293. }
  294. /**
  295. * get_cluster_sleep_time() - It returns the aggregated next_wakeup of all cpus
  296. * which are in online for this cluster domain.
  297. * @cluster_gov: Targeted cluster's lpm data structure
  298. */
  299. ktime_t get_cluster_sleep_time(struct lpm_cluster *cluster_gov)
  300. {
  301. int cpu;
  302. ktime_t next_wakeup, next_cpu_wakeup;
  303. struct generic_pm_domain *genpd = cluster_gov->genpd;
  304. next_wakeup = KTIME_MAX;
  305. for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
  306. next_cpu_wakeup = cluster_gov->cpu_next_wakeup[cpu];
  307. if (ktime_before(next_cpu_wakeup, next_wakeup))
  308. next_wakeup = next_cpu_wakeup;
  309. }
  310. return next_wakeup;
  311. }
  312. /**
  313. * update_cluster_next_wakeup() - Update the this cluster device next wakeup with
  314. * aggregated next_wakeup of all cpus which are in
  315. * lpm for this cluster or this clusters predicted
  316. * next wakeup whichever is earlier.
  317. * @cluster_gov: Targeted cluster's lpm data structure
  318. */
  319. static void update_cluster_next_wakeup(struct lpm_cluster *cluster_gov)
  320. {
  321. cluster_gov->next_wakeup = get_cluster_sleep_time(cluster_gov);
  322. if (cluster_gov->pred_wakeup) {
  323. if (ktime_before(cluster_gov->pred_wakeup,
  324. cluster_gov->next_wakeup))
  325. cluster_gov->next_wakeup = cluster_gov->pred_wakeup;
  326. }
  327. dev_pm_genpd_set_next_wakeup(cluster_gov->dev,
  328. cluster_gov->next_wakeup);
  329. }
  330. /**
  331. * update_cluster_select() - This will be called when cpu is going to lpm to update
  332. * its next wakeup value to corresponding cluster domain device.
  333. * @cpu_gov: CPU's lpm data structure.
  334. */
  335. void update_cluster_select(struct lpm_cpu *cpu_gov)
  336. {
  337. struct generic_pm_domain *genpd;
  338. struct lpm_cluster *cluster_gov;
  339. int cpu = cpu_gov->cpu;
  340. list_for_each_entry(cluster_gov, &cluster_dev_list, list) {
  341. if (!cluster_gov->initialized)
  342. continue;
  343. genpd = cluster_gov->genpd;
  344. if (cpumask_test_cpu(cpu, genpd->cpus)) {
  345. spin_lock(&cluster_gov->lock);
  346. cluster_gov->now = cpu_gov->now;
  347. cluster_gov->cpu_next_wakeup[cpu] = cpu_gov->next_wakeup;
  348. update_cluster_next_wakeup(cluster_gov);
  349. spin_unlock(&cluster_gov->lock);
  350. }
  351. }
  352. }
  353. #if defined(_TRACE_HOOK_PM_DOMAIN_H)
  354. static void android_vh_allow_domain_state(void *unused,
  355. struct generic_pm_domain *genpd,
  356. uint32_t idx, bool *allow)
  357. {
  358. struct lpm_cluster *cluster_gov = to_cluster(genpd);
  359. if (!cluster_gov)
  360. return;
  361. *allow = cluster_gov->state_allowed[idx];
  362. }
  363. #endif
  364. static void cluster_gov_disable(void)
  365. {
  366. #if defined(_TRACE_HOOK_PM_DOMAIN_H)
  367. unregister_trace_android_vh_allow_domain_state(android_vh_allow_domain_state, NULL);
  368. #endif
  369. }
  370. static void cluster_gov_enable(void)
  371. {
  372. #if defined(_TRACE_HOOK_PM_DOMAIN_H)
  373. register_trace_android_vh_allow_domain_state(android_vh_allow_domain_state, NULL);
  374. #endif
  375. }
  376. struct cluster_governor gov_ops = {
  377. .select = update_cluster_select,
  378. .enable = cluster_gov_enable,
  379. .disable = cluster_gov_disable,
  380. };
  381. static int lpm_cluster_gov_remove(struct platform_device *pdev)
  382. {
  383. struct generic_pm_domain *genpd = pd_to_genpd(pdev->dev.pm_domain);
  384. struct lpm_cluster *cluster_gov = to_cluster(genpd);
  385. if (!cluster_gov)
  386. return -ENODEV;
  387. pm_runtime_disable(&pdev->dev);
  388. cluster_gov->genpd->flags &= ~GENPD_FLAG_MIN_RESIDENCY;
  389. remove_cluster_sysfs_nodes(cluster_gov);
  390. dev_pm_genpd_remove_notifier(cluster_gov->dev);
  391. return 0;
  392. }
  393. static int lpm_cluster_gov_probe(struct platform_device *pdev)
  394. {
  395. int ret;
  396. int i;
  397. struct lpm_cluster *cluster_gov;
  398. cluster_gov = devm_kzalloc(&pdev->dev,
  399. sizeof(struct lpm_cluster),
  400. GFP_KERNEL);
  401. if (!cluster_gov)
  402. return -ENOMEM;
  403. spin_lock_init(&cluster_gov->lock);
  404. cluster_gov->dev = &pdev->dev;
  405. cluster_gov->pred_wakeup = KTIME_MAX;
  406. pm_runtime_enable(&pdev->dev);
  407. hrtimer_init(&cluster_gov->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  408. cluster_gov->genpd = pd_to_genpd(cluster_gov->dev->pm_domain);
  409. cluster_gov->genpd_nb.notifier_call = cluster_power_cb;
  410. cluster_gov->genpd->flags |= GENPD_FLAG_MIN_RESIDENCY;
  411. ret = dev_pm_genpd_add_notifier(cluster_gov->dev,
  412. &cluster_gov->genpd_nb);
  413. if (ret) {
  414. pm_runtime_disable(&pdev->dev);
  415. return ret;
  416. }
  417. if (create_cluster_sysfs_nodes(cluster_gov)) {
  418. pm_runtime_disable(&pdev->dev);
  419. return ret;
  420. }
  421. list_add_tail(&cluster_gov->list, &cluster_dev_list);
  422. cluster_gov->initialized = true;
  423. for (i = 0; i < cluster_gov->genpd->state_count; i++)
  424. cluster_gov->state_allowed[i] = true;
  425. register_cluster_governor_ops(&gov_ops);
  426. return 0;
  427. }
  428. static const struct of_device_id qcom_cluster_lpm[] = {
  429. { .compatible = "qcom,lpm-cluster-dev" },
  430. { }
  431. };
  432. static struct platform_driver qcom_cluster_lpm_driver = {
  433. .probe = lpm_cluster_gov_probe,
  434. .remove = lpm_cluster_gov_remove,
  435. .driver = {
  436. .name = "qcom-cluster-lpm-gov",
  437. .of_match_table = qcom_cluster_lpm,
  438. .suppress_bind_attrs = true,
  439. },
  440. };
  441. void qcom_cluster_lpm_governor_deinit(void)
  442. {
  443. platform_driver_unregister(&qcom_cluster_lpm_driver);
  444. }
  445. int qcom_cluster_lpm_governor_init(void)
  446. {
  447. return platform_driver_register(&qcom_cluster_lpm_driver);
  448. }