qcom-lpm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2006-2007 Adam Belay <[email protected]>
  4. * Copyright (C) 2009 Intel Corporation
  5. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/cpuidle.h>
  10. #include <linux/cpu_pm.h>
  11. #include <linux/ktime.h>
  12. #include <linux/module.h>
  13. #include <linux/pm_domain.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/pm_qos.h>
  16. #include <linux/sched/idle.h>
  17. #if IS_ENABLED(CONFIG_SCHED_WALT)
  18. #include <linux/sched/walt.h>
  19. #endif
  20. #include <linux/smp.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/string.h>
  23. #include <linux/suspend.h>
  24. #include <linux/tick.h>
  25. #include <linux/time64.h>
  26. #include <trace/events/ipi.h>
  27. #include <trace/events/power.h>
  28. #include <trace/hooks/cpuidle.h>
  29. #include "qcom-lpm.h"
  30. #define CREATE_TRACE_POINTS
  31. #include "trace-qcom-lpm.h"
  32. #define LPM_PRED_RESET 0
  33. #define LPM_PRED_RESIDENCY_PATTERN 1
  34. #define LPM_PRED_PREMATURE_EXITS 2
  35. #define LPM_PRED_IPI_PATTERN 3
  36. #define LPM_SELECT_STATE_DISABLED 0
  37. #define LPM_SELECT_STATE_QOS_UNMET 1
  38. #define LPM_SELECT_STATE_RESIDENCY_UNMET 2
  39. #define LPM_SELECT_STATE_PRED 3
  40. #define LPM_SELECT_STATE_IPI_PENDING 4
  41. #define LPM_SELECT_STATE_SCHED_BIAS 5
  42. #define LPM_SELECT_STATE_MAX 7
  43. #define UPDATE_REASON(i, u) (BIT(u) << (MAX_LPM_CPUS * i))
  44. bool prediction_disabled;
  45. bool sleep_disabled = true;
  46. static bool suspend_in_progress;
  47. static bool traces_registered;
  48. static struct cluster_governor *cluster_gov_ops;
  49. DEFINE_PER_CPU(struct lpm_cpu, lpm_cpu_data);
  50. static inline bool check_cpu_isactive(int cpu)
  51. {
  52. return cpu_active(cpu);
  53. }
  54. static bool lpm_disallowed(s64 sleep_ns, int cpu)
  55. {
  56. #if IS_ENABLED(CONFIG_SCHED_WALT)
  57. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  58. uint64_t bias_time = 0;
  59. #endif
  60. if (suspend_in_progress)
  61. return true;
  62. if (!check_cpu_isactive(cpu))
  63. return false;
  64. if ((sleep_disabled || sleep_ns < 0))
  65. return true;
  66. #if IS_ENABLED(CONFIG_SCHED_WALT)
  67. if (!sched_lpm_disallowed_time(cpu, &bias_time)) {
  68. cpu_gov->last_idx = 0;
  69. cpu_gov->bias = bias_time;
  70. return true;
  71. }
  72. #endif
  73. return false;
  74. }
  75. /**
  76. * histtimer_fn() - Will be executed when per cpu prediction timer expires
  77. * @h: cpu prediction timer
  78. */
  79. static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
  80. {
  81. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  82. cpu_gov->history_invalid = 1;
  83. return HRTIMER_NORESTART;
  84. }
  85. /**
  86. * histtimer_start() - Program the hrtimer with given timer value
  87. * @time_ns: Value to be program
  88. */
  89. static void histtimer_start(uint32_t time_ns)
  90. {
  91. ktime_t hist_ktime = ns_to_ktime(time_ns * NSEC_PER_USEC);
  92. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  93. struct hrtimer *cpu_histtimer = &cpu_gov->histtimer;
  94. cpu_histtimer->function = histtimer_fn;
  95. hrtimer_start(cpu_histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
  96. }
  97. /**
  98. * histtimer_cancel() - Cancel the histtimer after cpu wakes up from lpm
  99. */
  100. static void histtimer_cancel(void)
  101. {
  102. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  103. struct hrtimer *cpu_histtimer = &cpu_gov->histtimer;
  104. ktime_t time_rem;
  105. if (!hrtimer_active(cpu_histtimer))
  106. return;
  107. time_rem = hrtimer_get_remaining(cpu_histtimer);
  108. if (ktime_to_us(time_rem) <= 0)
  109. return;
  110. hrtimer_try_to_cancel(cpu_histtimer);
  111. }
  112. static void biastimer_cancel(void)
  113. {
  114. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  115. struct hrtimer *cpu_biastimer = &cpu_gov->biastimer;
  116. ktime_t time_rem;
  117. if (!cpu_gov->bias)
  118. return;
  119. cpu_gov->bias = 0;
  120. time_rem = hrtimer_get_remaining(cpu_biastimer);
  121. if (ktime_to_us(time_rem) <= 0)
  122. return;
  123. hrtimer_try_to_cancel(cpu_biastimer);
  124. }
  125. static enum hrtimer_restart biastimer_fn(struct hrtimer *h)
  126. {
  127. return HRTIMER_NORESTART;
  128. }
  129. static void biastimer_start(uint32_t time_ns)
  130. {
  131. ktime_t bias_ktime = ns_to_ktime(time_ns);
  132. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  133. struct hrtimer *cpu_biastimer = &cpu_gov->biastimer;
  134. cpu_biastimer->function = biastimer_fn;
  135. hrtimer_start(cpu_biastimer, bias_ktime, HRTIMER_MODE_REL_PINNED);
  136. }
  137. /**
  138. * find_deviation() - Try to detect repeat patterns by keeping track of past
  139. * samples and check if the standard deviation of that set
  140. * of previous sample is below a threshold. If it is below
  141. * threshold then use average of these past samples as
  142. * predicted value.
  143. * @cpu_gov: targeted cpu's lpm data structure
  144. * @duration_ns: cpu's scheduler sleep length
  145. */
  146. static uint64_t find_deviation(struct lpm_cpu *cpu_gov, int *samples_history,
  147. u64 duration_ns)
  148. {
  149. uint64_t max, avg, stddev;
  150. uint64_t thresh = LLONG_MAX;
  151. struct cpuidle_driver *drv = cpu_gov->drv;
  152. int divisor, i, last_level = drv->state_count - 1;
  153. struct cpuidle_state *max_state = &drv->states[last_level];
  154. do {
  155. max = avg = divisor = stddev = 0;
  156. for (i = 0; i < MAXSAMPLES; i++) {
  157. int64_t value = samples_history[i];
  158. if (value <= thresh) {
  159. avg += value;
  160. divisor++;
  161. if (value > max)
  162. max = value;
  163. }
  164. }
  165. do_div(avg, divisor);
  166. for (i = 0; i < MAXSAMPLES; i++) {
  167. int64_t value = samples_history[i];
  168. if (value <= thresh) {
  169. int64_t diff = value - avg;
  170. stddev += diff * diff;
  171. }
  172. }
  173. do_div(stddev, divisor);
  174. stddev = int_sqrt(stddev);
  175. /*
  176. * If the deviation is less, return the average, else
  177. * ignore one maximum sample and retry
  178. */
  179. if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
  180. || stddev <= PRED_REF_STDDEV) {
  181. do_div(duration_ns, NSEC_PER_USEC);
  182. if (avg >= duration_ns ||
  183. avg > max_state->target_residency)
  184. return 0;
  185. cpu_gov->next_pred_time = ktime_to_us(cpu_gov->now) + avg;
  186. return avg;
  187. }
  188. thresh = max - 1;
  189. } while (divisor > (MAXSAMPLES - 1));
  190. return 0;
  191. }
  192. /**
  193. * cpu_predict() - Predict the cpus next wakeup.
  194. * @cpu_gov: targeted cpu's lpm data structure
  195. * @duration_ns: cpu's scheduler sleep length
  196. */
  197. static void cpu_predict(struct lpm_cpu *cpu_gov, u64 duration_ns)
  198. {
  199. int i, j;
  200. struct cpuidle_driver *drv = cpu_gov->drv;
  201. struct cpuidle_state *min_state = &drv->states[0];
  202. struct history_lpm *lpm_history = &cpu_gov->lpm_history;
  203. struct history_ipi *ipi_history = &cpu_gov->ipi_history;
  204. if (prediction_disabled)
  205. return;
  206. /*
  207. * Samples are marked invalid when woken-up due to timer,
  208. * so do not predict.
  209. */
  210. if (cpu_gov->history_invalid) {
  211. cpu_gov->history_invalid = false;
  212. cpu_gov->htmr_wkup = true;
  213. cpu_gov->next_pred_time = 0;
  214. return;
  215. }
  216. /*
  217. * If the duration_ns itself is not sufficient for deeper
  218. * low power modes than clock gating do not predict
  219. */
  220. if (min_state->target_residency_ns > duration_ns)
  221. return;
  222. /* Predict only when all the samples are collected */
  223. if (lpm_history->nsamp < MAXSAMPLES) {
  224. cpu_gov->next_pred_time = 0;
  225. return;
  226. }
  227. /*
  228. * Check if the samples are not much deviated, if so use the
  229. * average of those as predicted sleep time. Else if any
  230. * specific mode has more premature exits return the index of
  231. * that mode.
  232. */
  233. cpu_gov->predicted = find_deviation(cpu_gov, lpm_history->resi, duration_ns);
  234. if (cpu_gov->predicted) {
  235. cpu_gov->pred_type = LPM_PRED_RESIDENCY_PATTERN;
  236. return;
  237. }
  238. /*
  239. * Find the number of premature exits for each of the mode,
  240. * excluding clockgating mode, and they are more than fifty
  241. * percent restrict that and deeper modes.
  242. */
  243. for (j = 1; j < drv->state_count; j++) {
  244. struct cpuidle_state *s = &drv->states[j];
  245. uint32_t min_residency = s->target_residency;
  246. uint32_t count = 0;
  247. uint64_t avg_residency = 0;
  248. for (i = 0; i < MAXSAMPLES; i++) {
  249. if ((lpm_history->mode[i] == j) &&
  250. (lpm_history->resi[i] < min_residency)) {
  251. count++;
  252. avg_residency += lpm_history->resi[i];
  253. }
  254. }
  255. if (count >= PRED_PREMATURE_CNT) {
  256. do_div(avg_residency, count);
  257. cpu_gov->predicted = avg_residency;
  258. cpu_gov->next_pred_time = ktime_to_us(cpu_gov->now)
  259. + cpu_gov->predicted;
  260. cpu_gov->pred_type = LPM_PRED_PREMATURE_EXITS;
  261. break;
  262. }
  263. }
  264. if (cpu_gov->predicted)
  265. return;
  266. cpu_gov->predicted = find_deviation(cpu_gov, ipi_history->interval,
  267. duration_ns);
  268. if (cpu_gov->predicted)
  269. cpu_gov->pred_type = LPM_PRED_IPI_PATTERN;
  270. }
  271. /**
  272. * clear_cpu_predict_history() - Clears the stored previous samples data.
  273. * It will be called when APSS going to deep sleep.
  274. */
  275. void clear_cpu_predict_history(void)
  276. {
  277. struct lpm_cpu *cpu_gov;
  278. struct history_lpm *lpm_history;
  279. int i, cpu;
  280. if (prediction_disabled)
  281. return;
  282. for_each_possible_cpu(cpu) {
  283. cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  284. lpm_history = &cpu_gov->lpm_history;
  285. for (i = 0; i < MAXSAMPLES; i++) {
  286. lpm_history->resi[i] = 0;
  287. lpm_history->mode[i] = -1;
  288. lpm_history->samples_idx = 0;
  289. lpm_history->nsamp = 0;
  290. cpu_gov->next_pred_time = 0;
  291. cpu_gov->pred_type = LPM_PRED_RESET;
  292. }
  293. }
  294. }
  295. /**
  296. * update_cpu_history() - Update the samples history data every time when
  297. * cpu comes from sleep.
  298. * @cpu_gov: targeted cpu's lpm data structure
  299. */
  300. static void update_cpu_history(struct lpm_cpu *cpu_gov)
  301. {
  302. bool tmr = false;
  303. int idx = cpu_gov->last_idx;
  304. struct history_lpm *lpm_history = &cpu_gov->lpm_history;
  305. u64 measured_us = ktime_to_us(cpu_gov->dev->last_residency_ns);
  306. struct cpuidle_state *target;
  307. if (sleep_disabled || prediction_disabled || idx < 0 ||
  308. idx > cpu_gov->drv->state_count - 1)
  309. return;
  310. target = &cpu_gov->drv->states[idx];
  311. if (measured_us > target->exit_latency)
  312. measured_us -= target->exit_latency;
  313. if (cpu_gov->htmr_wkup) {
  314. if (!lpm_history->samples_idx)
  315. lpm_history->samples_idx = MAXSAMPLES - 1;
  316. else
  317. lpm_history->samples_idx--;
  318. lpm_history->resi[lpm_history->samples_idx] += measured_us;
  319. cpu_gov->htmr_wkup = false;
  320. tmr = true;
  321. } else
  322. lpm_history->resi[lpm_history->samples_idx] = measured_us;
  323. lpm_history->mode[lpm_history->samples_idx] = idx;
  324. cpu_gov->pred_type = LPM_PRED_RESET;
  325. trace_gov_pred_hist(idx, lpm_history->resi[lpm_history->samples_idx],
  326. tmr);
  327. if (lpm_history->nsamp < MAXSAMPLES)
  328. lpm_history->nsamp++;
  329. lpm_history->samples_idx++;
  330. if (lpm_history->samples_idx >= MAXSAMPLES)
  331. lpm_history->samples_idx = 0;
  332. }
  333. void update_ipi_history(int cpu)
  334. {
  335. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  336. struct history_ipi *history = &cpu_gov->ipi_history;
  337. ktime_t now = ktime_get();
  338. history->interval[history->current_ptr] =
  339. ktime_to_us(ktime_sub(now,
  340. history->cpu_idle_resched_ts));
  341. (history->current_ptr)++;
  342. if (history->current_ptr >= MAXSAMPLES)
  343. history->current_ptr = 0;
  344. history->cpu_idle_resched_ts = now;
  345. }
  346. /**
  347. * lpm_cpu_qos_notify() - It will be called when any new request came on PM QoS.
  348. * It wakes up the cpu if it is in idle sleep to honour
  349. * the new PM QoS request.
  350. * @nfb: notifier block of the CPU
  351. * @val: notification value
  352. * @ptr: pointer to private data structure
  353. */
  354. static int lpm_cpu_qos_notify(struct notifier_block *nfb,
  355. unsigned long val, void *ptr)
  356. {
  357. struct lpm_cpu *cpu_gov = container_of(nfb, struct lpm_cpu, nb);
  358. int cpu = cpu_gov->cpu;
  359. if (!cpu_gov->enable)
  360. return NOTIFY_OK;
  361. preempt_disable();
  362. if (cpu != smp_processor_id() && cpu_online(cpu) &&
  363. check_cpu_isactive(cpu))
  364. wake_up_if_idle(cpu);
  365. preempt_enable();
  366. return NOTIFY_OK;
  367. }
  368. static int lpm_offline_cpu(unsigned int cpu)
  369. {
  370. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  371. struct device *dev = get_cpu_device(cpu);
  372. if (!dev || !cpu_gov)
  373. return 0;
  374. dev_pm_qos_remove_notifier(dev, &cpu_gov->nb,
  375. DEV_PM_QOS_RESUME_LATENCY);
  376. return 0;
  377. }
  378. static int lpm_online_cpu(unsigned int cpu)
  379. {
  380. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  381. struct device *dev = get_cpu_device(cpu);
  382. if (!dev || !cpu_gov)
  383. return 0;
  384. cpu_gov->nb.notifier_call = lpm_cpu_qos_notify;
  385. dev_pm_qos_add_notifier(dev, &cpu_gov->nb,
  386. DEV_PM_QOS_RESUME_LATENCY);
  387. return 0;
  388. }
  389. static void ipi_raise(void *ignore, const struct cpumask *mask, const char *unused)
  390. {
  391. int cpu;
  392. struct lpm_cpu *cpu_gov;
  393. unsigned long flags;
  394. if (suspend_in_progress)
  395. return;
  396. for_each_cpu(cpu, mask) {
  397. cpu_gov = &(per_cpu(lpm_cpu_data, cpu));
  398. if (!cpu_gov->enable)
  399. return;
  400. spin_lock_irqsave(&cpu_gov->lock, flags);
  401. cpu_gov->ipi_pending = true;
  402. spin_unlock_irqrestore(&cpu_gov->lock, flags);
  403. update_ipi_history(cpu);
  404. }
  405. }
  406. static void ipi_entry(void *ignore, const char *unused)
  407. {
  408. int cpu;
  409. struct lpm_cpu *cpu_gov;
  410. unsigned long flags;
  411. if (suspend_in_progress)
  412. return;
  413. cpu = raw_smp_processor_id();
  414. cpu_gov = &(per_cpu(lpm_cpu_data, cpu));
  415. if (!cpu_gov->enable)
  416. return;
  417. spin_lock_irqsave(&cpu_gov->lock, flags);
  418. cpu_gov->ipi_pending = false;
  419. spin_unlock_irqrestore(&cpu_gov->lock, flags);
  420. }
  421. /**
  422. * get_cpus_qos() - Returns the aggrigated PM QoS request.
  423. * @mask: cpumask of the cpus
  424. */
  425. static inline s64 get_cpus_qos(const struct cpumask *mask)
  426. {
  427. int cpu;
  428. s64 n, latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE * NSEC_PER_USEC;
  429. for_each_cpu(cpu, mask) {
  430. if (!check_cpu_isactive(cpu))
  431. continue;
  432. n = cpuidle_governor_latency_req(cpu);
  433. if (n < latency)
  434. latency = n;
  435. }
  436. return latency;
  437. }
  438. /**
  439. * start_prediction_timer() - Programs the prediction hrtimer and make the timer
  440. * to run. It wakes up the cpus from shallower state in
  441. * misprediction case and saves the power by not letting
  442. * the cpu remains in sollower state.
  443. * @cpu_gov: cpu's lpm data structure
  444. * @duration_us: cpu's scheduled sleep length
  445. */
  446. static int start_prediction_timer(struct lpm_cpu *cpu_gov, int duration_us)
  447. {
  448. struct cpuidle_state *s;
  449. uint32_t htime = 0, max_residency;
  450. uint32_t last_level = cpu_gov->drv->state_count - 1;
  451. if (!cpu_gov->predicted || cpu_gov->last_idx >= last_level)
  452. return 0;
  453. if (cpu_gov->next_wakeup > cpu_gov->next_pred_time)
  454. cpu_gov->next_wakeup = cpu_gov->next_pred_time;
  455. s = &cpu_gov->drv->states[0];
  456. max_residency = s[cpu_gov->last_idx + 1].target_residency - 1;
  457. htime = cpu_gov->predicted + PRED_TIMER_ADD;
  458. if (htime > max_residency)
  459. htime = max_residency;
  460. if ((duration_us > htime) && ((duration_us - htime) > max_residency))
  461. histtimer_start(htime);
  462. return htime;
  463. }
  464. void register_cluster_governor_ops(struct cluster_governor *ops)
  465. {
  466. if (!ops)
  467. return;
  468. cluster_gov_ops = ops;
  469. }
  470. /**
  471. * lpm_select() - Find the best idle state for the cpu device
  472. * @dev: Target cpu
  473. * @state: Entered state
  474. * @stop_tick: Is the tick device stopped
  475. *
  476. * Return: Best cpu LPM mode to enter
  477. */
  478. static int lpm_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
  479. bool *stop_tick)
  480. {
  481. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  482. s64 latency_req = get_cpus_qos(cpumask_of(dev->cpu));
  483. ktime_t delta_tick;
  484. u64 reason = 0;
  485. uint64_t duration_ns, htime = 0;
  486. int i = 0;
  487. if (!cpu_gov)
  488. return 0;
  489. do_div(latency_req, NSEC_PER_USEC);
  490. cpu_gov->predicted = 0;
  491. cpu_gov->predict_started = false;
  492. cpu_gov->now = ktime_get();
  493. duration_ns = tick_nohz_get_sleep_length(&delta_tick);
  494. update_cpu_history(cpu_gov);
  495. if (lpm_disallowed(duration_ns, dev->cpu))
  496. goto done;
  497. for (i = drv->state_count - 1; i > 0; i--) {
  498. struct cpuidle_state *s = &drv->states[i];
  499. if (dev->states_usage[i].disable) {
  500. reason |= UPDATE_REASON(i, LPM_SELECT_STATE_DISABLED);
  501. continue;
  502. }
  503. if (latency_req < s->exit_latency) {
  504. reason |= UPDATE_REASON(i, LPM_SELECT_STATE_QOS_UNMET);
  505. continue;
  506. }
  507. if (s->target_residency_ns > duration_ns) {
  508. reason |= UPDATE_REASON(i,
  509. LPM_SELECT_STATE_RESIDENCY_UNMET);
  510. continue;
  511. }
  512. if (check_cpu_isactive(dev->cpu) && !cpu_gov->predict_started) {
  513. cpu_predict(cpu_gov, duration_ns);
  514. cpu_gov->predict_started = true;
  515. }
  516. if (cpu_gov->predicted)
  517. if (s->target_residency > cpu_gov->predicted) {
  518. reason |= UPDATE_REASON(i,
  519. LPM_SELECT_STATE_PRED);
  520. continue;
  521. }
  522. break;
  523. }
  524. do_div(duration_ns, NSEC_PER_USEC);
  525. cpu_gov->last_idx = i;
  526. cpu_gov->next_wakeup = ktime_add_us(cpu_gov->now, duration_ns);
  527. htime = start_prediction_timer(cpu_gov, duration_ns);
  528. /* update this cpu next_wakeup into its parent power domain device */
  529. if (cpu_gov->last_idx == drv->state_count - 1) {
  530. if (cluster_gov_ops && cluster_gov_ops->select)
  531. cluster_gov_ops->select(cpu_gov);
  532. }
  533. done:
  534. if ((!cpu_gov->last_idx) && cpu_gov->bias) {
  535. biastimer_start(cpu_gov->bias);
  536. reason |= UPDATE_REASON(i, LPM_SELECT_STATE_SCHED_BIAS);
  537. }
  538. trace_lpm_gov_select(i, latency_req, duration_ns, reason);
  539. trace_gov_pred_select(cpu_gov->pred_type, cpu_gov->predicted, htime);
  540. return i;
  541. }
  542. /**
  543. * lpm_reflect() - Update the state entered by the cpu device
  544. * @dev: Target CPU
  545. * @state: Entered state
  546. */
  547. static void lpm_reflect(struct cpuidle_device *dev, int state)
  548. {
  549. }
  550. /**
  551. * lpm_idle_enter() - Notification with cpuidle state during idle entry
  552. * @unused: unused
  553. * @state: selected state by governor's .select
  554. * @dev: cpuidle_device
  555. */
  556. static void lpm_idle_enter(void *unused, int *state, struct cpuidle_device *dev)
  557. {
  558. struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
  559. u64 reason = 0;
  560. unsigned long flags;
  561. if (*state == 0)
  562. return;
  563. if (!cpu_gov->enable)
  564. return;
  565. /* Restrict to WFI state if there is an IPI pending on current CPU */
  566. spin_lock_irqsave(&cpu_gov->lock, flags);
  567. if (cpu_gov->ipi_pending) {
  568. reason = UPDATE_REASON(*state, LPM_SELECT_STATE_IPI_PENDING);
  569. *state = 0;
  570. trace_lpm_gov_select(*state, 0xdeaffeed, 0xdeaffeed, reason);
  571. }
  572. spin_unlock_irqrestore(&cpu_gov->lock, flags);
  573. }
  574. /**
  575. * lpm_idle_exit() - Notification with cpuidle state during idle exit
  576. * @unused: unused
  577. * @state: actual entered state by cpuidle
  578. * @dev: cpuidle_device
  579. */
  580. static void lpm_idle_exit(void *unused, int state, struct cpuidle_device *dev)
  581. {
  582. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, dev->cpu);
  583. if (cpu_gov->enable) {
  584. histtimer_cancel();
  585. biastimer_cancel();
  586. }
  587. }
  588. /**
  589. * lpm_enable_device() - Initialize the governor's data for the CPU
  590. * @drv: cpuidle driver
  591. * @dev: Target CPU
  592. */
  593. static int lpm_enable_device(struct cpuidle_driver *drv,
  594. struct cpuidle_device *dev)
  595. {
  596. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, dev->cpu);
  597. struct hrtimer *cpu_histtimer = &cpu_gov->histtimer;
  598. struct hrtimer *cpu_biastimer = &cpu_gov->biastimer;
  599. int ret;
  600. spin_lock_init(&cpu_gov->lock);
  601. hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  602. hrtimer_init(cpu_biastimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  603. if (!traces_registered) {
  604. ret = register_trace_ipi_raise(ipi_raise, NULL);
  605. if (ret)
  606. return ret;
  607. ret = register_trace_ipi_entry(ipi_entry, NULL);
  608. if (ret) {
  609. unregister_trace_ipi_raise(ipi_raise, NULL);
  610. return ret;
  611. }
  612. ret = register_trace_prio_android_vh_cpu_idle_enter(
  613. lpm_idle_enter, NULL, INT_MIN);
  614. if (ret) {
  615. unregister_trace_ipi_raise(ipi_raise, NULL);
  616. unregister_trace_ipi_entry(ipi_entry, NULL);
  617. return ret;
  618. }
  619. ret = register_trace_prio_android_vh_cpu_idle_exit(
  620. lpm_idle_exit, NULL, INT_MIN);
  621. if (ret) {
  622. unregister_trace_ipi_raise(ipi_raise, NULL);
  623. unregister_trace_ipi_entry(ipi_entry, NULL);
  624. unregister_trace_android_vh_cpu_idle_enter(
  625. lpm_idle_enter, NULL);
  626. return ret;
  627. }
  628. if (cluster_gov_ops && cluster_gov_ops->enable)
  629. cluster_gov_ops->enable();
  630. traces_registered = true;
  631. }
  632. cpu_gov->cpu = dev->cpu;
  633. cpu_gov->enable = true;
  634. cpu_gov->drv = drv;
  635. cpu_gov->dev = dev;
  636. cpu_gov->last_idx = -1;
  637. return 0;
  638. }
  639. /**
  640. * lpm_disable_device() - Clean up the governor's data for the CPU
  641. * @drv: cpuidle driver
  642. * @dev: Target CPU
  643. */
  644. static void lpm_disable_device(struct cpuidle_driver *drv,
  645. struct cpuidle_device *dev)
  646. {
  647. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, dev->cpu);
  648. int cpu;
  649. cpu_gov->enable = false;
  650. cpu_gov->last_idx = -1;
  651. for_each_possible_cpu(cpu) {
  652. struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
  653. if (cpu_gov->enable)
  654. return;
  655. }
  656. if (traces_registered) {
  657. unregister_trace_ipi_raise(ipi_raise, NULL);
  658. unregister_trace_ipi_entry(ipi_entry, NULL);
  659. unregister_trace_android_vh_cpu_idle_enter(
  660. lpm_idle_enter, NULL);
  661. unregister_trace_android_vh_cpu_idle_exit(
  662. lpm_idle_exit, NULL);
  663. if (cluster_gov_ops && cluster_gov_ops->disable)
  664. cluster_gov_ops->disable();
  665. traces_registered = false;
  666. }
  667. }
  668. static void qcom_lpm_suspend_trace(void *unused, const char *action,
  669. int event, bool start)
  670. {
  671. int cpu;
  672. if (start && !strcmp("dpm_suspend_late", action)) {
  673. suspend_in_progress = true;
  674. for_each_online_cpu(cpu)
  675. wake_up_if_idle(cpu);
  676. return;
  677. }
  678. if (!start && !strcmp("dpm_resume_early", action)) {
  679. suspend_in_progress = false;
  680. for_each_online_cpu(cpu)
  681. wake_up_if_idle(cpu);
  682. }
  683. }
  684. static struct cpuidle_governor lpm_governor = {
  685. .name = "qcom-cpu-lpm",
  686. .rating = 50,
  687. .enable = lpm_enable_device,
  688. .disable = lpm_disable_device,
  689. .select = lpm_select,
  690. .reflect = lpm_reflect,
  691. };
  692. static int __init qcom_lpm_governor_init(void)
  693. {
  694. int ret;
  695. ret = create_global_sysfs_nodes();
  696. if (ret)
  697. goto sysfs_fail;
  698. ret = qcom_cluster_lpm_governor_init();
  699. if (ret)
  700. goto cluster_init_fail;
  701. ret = cpuidle_register_governor(&lpm_governor);
  702. if (ret)
  703. goto cpuidle_reg_fail;
  704. ret = register_trace_suspend_resume(qcom_lpm_suspend_trace, NULL);
  705. if (ret)
  706. goto cpuidle_reg_fail;
  707. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "qcom-cpu-lpm",
  708. lpm_online_cpu, lpm_offline_cpu);
  709. if (ret < 0)
  710. goto cpuhp_setup_fail;
  711. return 0;
  712. cpuhp_setup_fail:
  713. unregister_trace_suspend_resume(qcom_lpm_suspend_trace, NULL);
  714. cpuidle_reg_fail:
  715. qcom_cluster_lpm_governor_deinit();
  716. cluster_init_fail:
  717. remove_global_sysfs_nodes();
  718. sysfs_fail:
  719. return ret;
  720. }
  721. module_init(qcom_lpm_governor_init);
  722. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. cpuidle LPM governor");
  723. MODULE_LICENSE("GPL v2");